diff --git a/go.mod b/go.mod index 3f4f47568cd..3503519cdd6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/TykTechnologies/tyk go 1.16 require ( - github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/Jeffail/gabs v1.4.0 github.com/Jeffail/tunny v0.1.4 github.com/Masterminds/sprig/v3 v3.2.2 @@ -22,53 +21,38 @@ require ( github.com/akutz/memconn v0.1.0 github.com/bshuster-repo/logrus-logstash-hook v0.4.1 github.com/buger/jsonparser v1.1.1 - github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 github.com/cenk/backoff v2.2.1+incompatible github.com/cenkalti/backoff/v4 v4.0.2 - github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67 // indirect github.com/clbanning/mxj v1.8.4 github.com/evalphobia/logrus_sentry v0.8.2 - github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect - github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727 // indirect - github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 github.com/garyburd/redigo v1.6.2 github.com/gemnasium/logrus-graylog-hook v2.0.7+incompatible github.com/getkin/kin-openapi v0.89.0 - github.com/getsentry/raven-go v0.2.0 // indirect github.com/go-redis/redis/v8 v8.11.5 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 github.com/gofrs/uuid v3.3.0+incompatible github.com/golang-jwt/jwt/v4 v4.4.2 github.com/golang/protobuf v1.5.2 - github.com/google/uuid v1.3.0 // indirect github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/hashicorp/consul/api v1.3.0 - github.com/hashicorp/go-msgpack v0.5.4 // indirect github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-retryablehttp v0.5.4 github.com/hashicorp/go-version v1.4.0 github.com/hashicorp/vault/api v1.0.4 - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect github.com/jensneuse/abstractlogger v0.0.4 github.com/justinas/alice v1.2.0 github.com/kelseyhightower/envconfig v1.4.0 - github.com/lonelycode/go-uuid v0.0.0-20141202165402-ed3ca8a15a93 // indirect github.com/lonelycode/osin v0.0.0-20160423095202-da239c9dacb6 - github.com/mavricknz/asn1-ber v0.0.0-20151103223136-b9df1c2f4213 // indirect github.com/mavricknz/ldap v0.0.0-20160227184754-f5a958005e43 github.com/miekg/dns v1.0.14 - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.1 - github.com/nats-io/nats-server/v2 v2.3.4 // indirect github.com/newrelic/go-agent v2.13.0+incompatible github.com/nsf/jsondiff v0.0.0-20210303162244-6ea32392771e // test github.com/opentracing/opentracing-go v1.2.0 github.com/openzipkin/zipkin-go v0.2.2 github.com/oschwald/maxminddb-golang v1.5.0 github.com/paulbellamy/ratecounter v0.2.0 - github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect github.com/pires/go-proxyproto v0.0.0-20190615163442-2c19fd512994 github.com/pmylund/go-cache v2.1.0+incompatible github.com/robertkrimen/otto v0.0.0-20180617131154-15f95af6e78d @@ -78,25 +62,41 @@ require ( github.com/square/go-jose v2.4.1+incompatible github.com/stretchr/testify v1.8.1 // test github.com/uber/jaeger-client-go v2.30.1-0.20220110192849-8d8e8fcfd04d+incompatible - github.com/uber/jaeger-lib v2.4.2-0.20210604143007-135cf5605a6d+incompatible // indirect github.com/valyala/fasthttp v1.43.0 // test github.com/vmihailenco/msgpack v4.0.4+incompatible - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/net v0.0.0-20220906165146-f3363e06e74c golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 google.golang.org/grpc v1.36.0 google.golang.org/grpc/examples v0.0.0-20220317213542-f95b001a48df // test gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 - gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/vmihailenco/msgpack.v2 v2.9.1 gopkg.in/xmlpath.v2 v2.0.0-20150820204837-860cbeca3ebc gopkg.in/yaml.v3 v3.0.1 ) +require ( + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67 // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect + github.com/getsentry/raven-go v0.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/go-msgpack v0.5.4 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/lonelycode/go-uuid v0.0.0-20141202165402-ed3ca8a15a93 // indirect + github.com/mavricknz/asn1-ber v0.0.0-20151103223136-b9df1c2f4213 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/nats-io/nats-server/v2 v2.3.4 // indirect + github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect + github.com/uber/jaeger-lib v2.4.2-0.20210604143007-135cf5605a6d+incompatible // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + gopkg.in/sourcemap.v1 v1.0.5 // indirect +) + replace gorm.io/gorm => github.com/TykTechnologies/gorm v1.20.7-0.20210409171139-b5c340f85ed0 //replace github.com/TykTechnologies/graphql-go-tools => ../graphql-go-tools diff --git a/go.sum b/go.sum index 590ac6852de..6314b4a047f 100644 --- a/go.sum +++ b/go.sum @@ -392,7 +392,6 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= diff --git a/vendor/github.com/Jeffail/gabs/LICENSE b/vendor/github.com/Jeffail/gabs/LICENSE new file mode 100644 index 00000000000..99a62c6298f --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Ashley Jeffs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Jeffail/gabs/README.md b/vendor/github.com/Jeffail/gabs/README.md new file mode 100644 index 00000000000..962ba686483 --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/README.md @@ -0,0 +1,290 @@ +![Gabs](gabs_logo.png "Gabs") + +Gabs is a small utility for dealing with dynamic or unknown JSON structures in +golang. It's pretty much just a helpful wrapper around the golang +`json.Marshal/json.Unmarshal` behaviour and `map[string]interface{}` objects. +It does nothing spectacular except for being fabulous. + +https://godoc.org/github.com/Jeffail/gabs + +## Install + +``` bash +go get github.com/Jeffail/gabs +``` + +## Use + +### Parsing and searching JSON + +``` go +jsonParsed, err := gabs.ParseJSON([]byte(`{ + "outter":{ + "inner":{ + "value1":10, + "value2":22 + }, + "alsoInner":{ + "value1":20, + "array1":[ + 30, 40 + ] + } + } +}`)) + +var value float64 +var ok bool + +value, ok = jsonParsed.Path("outter.inner.value1").Data().(float64) +// value == 10.0, ok == true + +value, ok = jsonParsed.Search("outter", "inner", "value1").Data().(float64) +// value == 10.0, ok == true + +gObj, err := jsonParsed.JSONPointer("/outter/alsoInner/array1/1") +if err != nil { + panic(err) +} +value, ok = gObj.Data().(float64) +// value == 40.0, ok == true + +value, ok = jsonParsed.Path("does.not.exist").Data().(float64) +// value == 0.0, ok == false + +exists := jsonParsed.Exists("outter", "inner", "value1") +// exists == true + +exists := jsonParsed.ExistsP("does.not.exist") +// exists == false +``` + +### Iterating objects + +``` go +jsonParsed, _ := gabs.ParseJSON([]byte(`{"object":{ "first": 1, "second": 2, "third": 3 }}`)) + +// S is shorthand for Search +children, _ := jsonParsed.S("object").ChildrenMap() +for key, child := range children { + fmt.Printf("key: %v, value: %v\n", key, child.Data().(string)) +} +``` + +### Iterating arrays + +``` go +jsonParsed, err := gabs.ParseJSON([]byte(`{"array":[ "first", "second", "third" ]}`)) +if err != nil { + panic(err) +} + +// S is shorthand for Search +children, err := jsonParsed.S("array").Children() +if err != nil { + panic(err) +} + +for _, child := range children { + fmt.Println(child.Data().(string)) +} +``` + +Will print: + +``` +first +second +third +``` + +Children() will return all children of an array in order. This also works on +objects, however, the children will be returned in a random order. + +### Searching through arrays + +If your JSON structure contains arrays you can still search the fields of the +objects within the array, this returns a JSON array containing the results for +each element. + +``` go +jsonParsed, err := gabs.ParseJSON([]byte(`{"array":[ {"value":1}, {"value":2}, {"value":3} ]}`)) +if err != nil { + panic(err) +} +fmt.Println(jsonParsed.Path("array.value").String()) +``` + +Will print: + +``` +[1,2,3] +``` + +### Generating JSON + +``` go +jsonObj := gabs.New() +// or gabs.Consume(jsonObject) to work on an existing map[string]interface{} + +jsonObj.Set(10, "outter", "inner", "value") +jsonObj.SetP(20, "outter.inner.value2") +jsonObj.Set(30, "outter", "inner2", "value3") + +fmt.Println(jsonObj.String()) +``` + +Will print: + +``` +{"outter":{"inner":{"value":10,"value2":20},"inner2":{"value3":30}}} +``` + +To pretty-print: + +``` go +fmt.Println(jsonObj.StringIndent("", " ")) +``` + +Will print: + +``` +{ + "outter": { + "inner": { + "value": 10, + "value2": 20 + }, + "inner2": { + "value3": 30 + } + } +} +``` + +### Generating Arrays + +``` go +jsonObj := gabs.New() + +jsonObj.Array("foo", "array") +// Or .ArrayP("foo.array") + +jsonObj.ArrayAppend(10, "foo", "array") +jsonObj.ArrayAppend(20, "foo", "array") +jsonObj.ArrayAppend(30, "foo", "array") + +fmt.Println(jsonObj.String()) +``` + +Will print: + +``` +{"foo":{"array":[10,20,30]}} +``` + +Working with arrays by index: + +``` go +jsonObj := gabs.New() + +// Create an array with the length of 3 +jsonObj.ArrayOfSize(3, "foo") + +jsonObj.S("foo").SetIndex("test1", 0) +jsonObj.S("foo").SetIndex("test2", 1) + +// Create an embedded array with the length of 3 +jsonObj.S("foo").ArrayOfSizeI(3, 2) + +jsonObj.S("foo").Index(2).SetIndex(1, 0) +jsonObj.S("foo").Index(2).SetIndex(2, 1) +jsonObj.S("foo").Index(2).SetIndex(3, 2) + +fmt.Println(jsonObj.String()) +``` + +Will print: + +``` +{"foo":["test1","test2",[1,2,3]]} +``` + +### Converting back to JSON + +This is the easiest part: + +``` go +jsonParsedObj, _ := gabs.ParseJSON([]byte(`{ + "outter":{ + "values":{ + "first":10, + "second":11 + } + }, + "outter2":"hello world" +}`)) + +jsonOutput := jsonParsedObj.String() +// Becomes `{"outter":{"values":{"first":10,"second":11}},"outter2":"hello world"}` +``` + +And to serialize a specific segment is as simple as: + +``` go +jsonParsedObj := gabs.ParseJSON([]byte(`{ + "outter":{ + "values":{ + "first":10, + "second":11 + } + }, + "outter2":"hello world" +}`)) + +jsonOutput := jsonParsedObj.Search("outter").String() +// Becomes `{"values":{"first":10,"second":11}}` +``` + +### Merge two containers + +You can merge a JSON structure into an existing one, where collisions will be +converted into a JSON array. + +``` go +jsonParsed1, _ := ParseJSON([]byte(`{"outter": {"value1": "one"}}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"outter": {"inner": {"value3": "three"}}, "outter2": {"value2": "two"}}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"outter":{"inner":{"value3":"three"},"value1":"one"},"outter2":{"value2":"two"}}` +``` + +Arrays are merged: + +``` go +jsonParsed1, _ := ParseJSON([]byte(`{"array": ["one"]}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"array": ["two"]}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"array":["one", "two"]}` +``` + +### Parsing Numbers + +Gabs uses the `json` package under the bonnet, which by default will parse all +number values into `float64`. If you need to parse `Int` values then you should +use a `json.Decoder` (https://golang.org/pkg/encoding/json/#Decoder): + +``` go +sample := []byte(`{"test":{"int":10, "float":6.66}}`) +dec := json.NewDecoder(bytes.NewReader(sample)) +dec.UseNumber() + +val, err := gabs.ParseJSONDecoder(dec) +if err != nil { + t.Errorf("Failed to parse: %v", err) + return +} + +intValue, err := val.Path("test.int").Data().(json.Number).Int64() +``` diff --git a/vendor/github.com/Jeffail/gabs/gabs.go b/vendor/github.com/Jeffail/gabs/gabs.go new file mode 100644 index 00000000000..011c4c39241 --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/gabs.go @@ -0,0 +1,727 @@ +/* +Copyright (c) 2014 Ashley Jeffs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +// Package gabs implements a simplified wrapper around creating and parsing +// unknown or dynamic JSON. +package gabs + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" +) + +//------------------------------------------------------------------------------ + +var ( + // ErrOutOfBounds indicates an index was out of bounds. + ErrOutOfBounds = errors.New("out of bounds") + + // ErrNotObjOrArray is returned when a target is not an object or array type + // but needs to be for the intended operation. + ErrNotObjOrArray = errors.New("not an object or array") + + // ErrNotObj is returned when a target is not an object but needs to be for + // the intended operation. + ErrNotObj = errors.New("not an object") + + // ErrNotArray is returned when a target is not an array but needs to be for + // the intended operation. + ErrNotArray = errors.New("not an array") + + // ErrPathCollision is returned when creating a path failed because an + // element collided with an existing value. + ErrPathCollision = errors.New("encountered value collision whilst building path") + + // ErrInvalidInputObj is returned when the input value was not a + // map[string]interface{}. + ErrInvalidInputObj = errors.New("invalid input object") + + // ErrInvalidInputText is returned when the input data could not be parsed. + ErrInvalidInputText = errors.New("input text could not be parsed") + + // ErrInvalidPath is returned when the filepath was not valid. + ErrInvalidPath = errors.New("invalid file path") + + // ErrInvalidBuffer is returned when the input buffer contained an invalid + // JSON string. + ErrInvalidBuffer = errors.New("input buffer contained invalid JSON") +) + +//------------------------------------------------------------------------------ + +func resolveJSONPointerHierarchy(path string) ([]string, error) { + if len(path) < 1 { + return nil, errors.New("failed to resolve JSON pointer: path must not be empty") + } + if path[0] != '/' { + return nil, errors.New("failed to resolve JSON pointer: path must begin with '/'") + } + hierarchy := strings.Split(path, "/")[1:] + for i, v := range hierarchy { + v = strings.Replace(v, "~1", "/", -1) + v = strings.Replace(v, "~0", "~", -1) + hierarchy[i] = v + } + return hierarchy, nil +} + +//------------------------------------------------------------------------------ + +// Container references a specific element within a JSON structure. +type Container struct { + object interface{} +} + +// Data returns the underlying interface{} of the target element in the JSON +// structure. +func (g *Container) Data() interface{} { + if g == nil { + return nil + } + return g.object +} + +//------------------------------------------------------------------------------ + +// Path searches the JSON structure following a path in dot notation. +func (g *Container) Path(path string) *Container { + return g.Search(strings.Split(path, ".")...) +} + +// Search attempts to find and return an object within the JSON structure by +// following a provided hierarchy of field names to locate the target. If the +// search encounters an array and has not reached the end target then it will +// iterate each object of the array for the target and return all of the results +// in a JSON array. +func (g *Container) Search(hierarchy ...string) *Container { + var object interface{} + + object = g.Data() + for target := 0; target < len(hierarchy); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + object, ok = mmap[hierarchy[target]] + if !ok { + return nil + } + } else if marray, ok := object.([]interface{}); ok { + tmpArray := []interface{}{} + for _, val := range marray { + tmpGabs := &Container{val} + res := tmpGabs.Search(hierarchy[target:]...) + if res != nil { + tmpArray = append(tmpArray, res.Data()) + } + } + if len(tmpArray) == 0 { + return nil + } + return &Container{tmpArray} + } else { + return nil + } + } + return &Container{object} +} + +// JSONPointer parses a JSON pointer path (https://tools.ietf.org/html/rfc6901) +// and either returns a *gabs.Container containing the result or an error if the +// referenced item could not be found. +func (g *Container) JSONPointer(path string) (*Container, error) { + hierarchy, err := resolveJSONPointerHierarchy(path) + if err != nil { + return nil, err + } + + object := g.Data() + for target := 0; target < len(hierarchy); target++ { + pathSeg := hierarchy[target] + if mmap, ok := object.(map[string]interface{}); ok { + object, ok = mmap[pathSeg] + if !ok { + return nil, fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) + } + } else if marray, ok := object.([]interface{}); ok { + index, err := strconv.Atoi(pathSeg) + if err != nil { + return nil, fmt.Errorf("failed to resolve JSON pointer: could not parse index '%v' value '%v' into array index: %v", target, pathSeg, err) + } + if len(marray) <= index { + return nil, fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' exceeded target array size of '%v'", target, pathSeg, len(marray)) + } + object = marray[index] + } else { + return &Container{nil}, fmt.Errorf("failed to resolve JSON pointer: index '%v' field '%v' was not found", target, pathSeg) + } + } + return &Container{object}, nil +} + +// S is a shorthand alias for Search. +func (g *Container) S(hierarchy ...string) *Container { + return g.Search(hierarchy...) +} + +// Exists checks whether a path exists. +func (g *Container) Exists(hierarchy ...string) bool { + return g.Search(hierarchy...) != nil +} + +// ExistsP checks whether a dot notation path exists. +func (g *Container) ExistsP(path string) bool { + return g.Exists(strings.Split(path, ".")...) +} + +// Index attempts to find and return an element within a JSON array by an index. +func (g *Container) Index(index int) *Container { + if array, ok := g.Data().([]interface{}); ok { + if index >= len(array) { + return &Container{nil} + } + return &Container{array[index]} + } + return &Container{nil} +} + +// Children returns a slice of all children of an array element. This also works +// for objects, however, the children returned for an object will be in a random +// order and you lose the names of the returned objects this way. +func (g *Container) Children() ([]*Container, error) { + if array, ok := g.Data().([]interface{}); ok { + children := make([]*Container, len(array)) + for i := 0; i < len(array); i++ { + children[i] = &Container{array[i]} + } + return children, nil + } + if mmap, ok := g.Data().(map[string]interface{}); ok { + children := []*Container{} + for _, obj := range mmap { + children = append(children, &Container{obj}) + } + return children, nil + } + return nil, ErrNotObjOrArray +} + +// ChildrenMap returns a map of all the children of an object element. +func (g *Container) ChildrenMap() (map[string]*Container, error) { + if mmap, ok := g.Data().(map[string]interface{}); ok { + children := map[string]*Container{} + for name, obj := range mmap { + children[name] = &Container{obj} + } + return children, nil + } + return nil, ErrNotObj +} + +//------------------------------------------------------------------------------ + +// Set the value of a field at a JSON path, any parts of the path that do not +// exist will be constructed, and if a collision occurs with a non object type +// whilst iterating the path an error is returned. +func (g *Container) Set(value interface{}, path ...string) (*Container, error) { + if len(path) == 0 { + g.object = value + return g, nil + } + var object interface{} + if g.object == nil { + g.object = map[string]interface{}{} + } + object = g.object + for target := 0; target < len(path); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + if target == len(path)-1 { + mmap[path[target]] = value + } else if mmap[path[target]] == nil { + mmap[path[target]] = map[string]interface{}{} + } + object = mmap[path[target]] + } else { + return &Container{nil}, ErrPathCollision + } + } + return &Container{object}, nil +} + +// SetP sets the value of a field at a JSON path using dot notation, any parts +// of the path that do not exist will be constructed, and if a collision occurs +// with a non object type whilst iterating the path an error is returned. +func (g *Container) SetP(value interface{}, path string) (*Container, error) { + return g.Set(value, strings.Split(path, ".")...) +} + +// SetIndex attempts to set a value of an array element based on an index. +func (g *Container) SetIndex(value interface{}, index int) (*Container, error) { + if array, ok := g.Data().([]interface{}); ok { + if index >= len(array) { + return &Container{nil}, ErrOutOfBounds + } + array[index] = value + return &Container{array[index]}, nil + } + return &Container{nil}, ErrNotArray +} + +// SetJSONPointer parses a JSON pointer path +// (https://tools.ietf.org/html/rfc6901) and sets the leaf to a value. Returns +// an error if the pointer could not be resolved due to missing fields. +func (g *Container) SetJSONPointer(value interface{}, path string) error { + hierarchy, err := resolveJSONPointerHierarchy(path) + if err != nil { + return err + } + + if len(hierarchy) == 0 { + g.object = value + return nil + } + + object := g.object + + for target := 0; target < len(hierarchy); target++ { + pathSeg := hierarchy[target] + if mmap, ok := object.(map[string]interface{}); ok { + if target == len(hierarchy)-1 { + object = value + mmap[pathSeg] = object + } else if object = mmap[pathSeg]; object == nil { + return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) + } + } else if marray, ok := object.([]interface{}); ok { + index, err := strconv.Atoi(pathSeg) + if err != nil { + return fmt.Errorf("failed to resolve JSON pointer: could not parse index '%v' value '%v' into array index: %v", target, pathSeg, err) + } + if len(marray) <= index { + return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' exceeded target array size of '%v'", target, pathSeg, len(marray)) + } + if target == len(hierarchy)-1 { + object = value + marray[index] = object + } else if object = marray[index]; object == nil { + return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) + } + } else { + return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) + } + } + return nil +} + +// Object creates a new JSON object at a target path. Returns an error if the +// path contains a collision with a non object type. +func (g *Container) Object(path ...string) (*Container, error) { + return g.Set(map[string]interface{}{}, path...) +} + +// ObjectP creates a new JSON object at a target path using dot notation. +// Returns an error if the path contains a collision with a non object type. +func (g *Container) ObjectP(path string) (*Container, error) { + return g.Object(strings.Split(path, ".")...) +} + +// ObjectI creates a new JSON object at an array index. Returns an error if the +// object is not an array or the index is out of bounds. +func (g *Container) ObjectI(index int) (*Container, error) { + return g.SetIndex(map[string]interface{}{}, index) +} + +// Array creates a new JSON array at a path. Returns an error if the path +// contains a collision with a non object type. +func (g *Container) Array(path ...string) (*Container, error) { + return g.Set([]interface{}{}, path...) +} + +// ArrayP creates a new JSON array at a path using dot notation. Returns an +// error if the path contains a collision with a non object type. +func (g *Container) ArrayP(path string) (*Container, error) { + return g.Array(strings.Split(path, ".")...) +} + +// ArrayI creates a new JSON array within an array at an index. Returns an error +// if the element is not an array or the index is out of bounds. +func (g *Container) ArrayI(index int) (*Container, error) { + return g.SetIndex([]interface{}{}, index) +} + +// ArrayOfSize creates a new JSON array of a particular size at a path. Returns +// an error if the path contains a collision with a non object type. +func (g *Container) ArrayOfSize(size int, path ...string) (*Container, error) { + a := make([]interface{}, size) + return g.Set(a, path...) +} + +// ArrayOfSizeP creates a new JSON array of a particular size at a path using +// dot notation. Returns an error if the path contains a collision with a non +// object type. +func (g *Container) ArrayOfSizeP(size int, path string) (*Container, error) { + return g.ArrayOfSize(size, strings.Split(path, ".")...) +} + +// ArrayOfSizeI create a new JSON array of a particular size within an array at +// an index. Returns an error if the element is not an array or the index is out +// of bounds. +func (g *Container) ArrayOfSizeI(size, index int) (*Container, error) { + a := make([]interface{}, size) + return g.SetIndex(a, index) +} + +// Delete an element at a path, an error is returned if the element does not +// exist. +func (g *Container) Delete(path ...string) error { + var object interface{} + + if g.object == nil { + return ErrNotObj + } + object = g.object + for target := 0; target < len(path); target++ { + if mmap, ok := object.(map[string]interface{}); ok { + if target == len(path)-1 { + if _, ok := mmap[path[target]]; ok { + delete(mmap, path[target]) + } else { + return ErrNotObj + } + } + object = mmap[path[target]] + } else { + return ErrNotObj + } + } + return nil +} + +// DeleteP deletes an element at a path using dot notation, an error is returned +// if the element does not exist. +func (g *Container) DeleteP(path string) error { + return g.Delete(strings.Split(path, ".")...) +} + +// MergeFn merges two objects using a provided function to resolve collisions. +// +// The collision function receives two interface{} arguments, destination (the +// original object) and source (the object being merged into the destination). +// Which ever value is returned becomes the new value in the destination object +// at the location of the collision. +func (g *Container) MergeFn(source *Container, collisionFn func(destination, source interface{}) interface{}) error { + var recursiveFnc func(map[string]interface{}, []string) error + recursiveFnc = func(mmap map[string]interface{}, path []string) error { + for key, value := range mmap { + newPath := append(path, key) + if g.Exists(newPath...) { + existingData := g.Search(newPath...).Data() + switch t := value.(type) { + case map[string]interface{}: + switch existingVal := existingData.(type) { + case map[string]interface{}: + if err := recursiveFnc(t, newPath); err != nil { + return err + } + default: + if _, err := g.Set(collisionFn(existingVal, t), newPath...); err != nil { + return err + } + } + default: + if _, err := g.Set(collisionFn(existingData, t), newPath...); err != nil { + return err + } + } + } else { + // path doesn't exist. So set the value + if _, err := g.Set(value, newPath...); err != nil { + return err + } + } + } + return nil + } + if mmap, ok := source.Data().(map[string]interface{}); ok { + return recursiveFnc(mmap, []string{}) + } + return nil +} + +// Merge a source object into an existing destination object. When a collision +// is found within the merged structures (both a source and destination object +// contain the same non-object keys) the result will be an array containing both +// values, where values that are already arrays will be expanded into the +// resulting array. +// +// It is possible to merge structures will different collision behaviours with +// MergeFn. +func (g *Container) Merge(source *Container) error { + return g.MergeFn(source, func(dest, source interface{}) interface{} { + destArr, destIsArray := dest.([]interface{}) + sourceArr, sourceIsArray := source.([]interface{}) + if destIsArray { + if sourceIsArray { + return append(destArr, sourceArr...) + } + return append(destArr, source) + } + if sourceIsArray { + return append(append([]interface{}{}, dest), sourceArr...) + } + return []interface{}{dest, source} + }) +} + +//------------------------------------------------------------------------------ + +/* +Array modification/search - Keeping these options simple right now, no need for +anything more complicated since you can just cast to []interface{}, modify and +then reassign with Set. +*/ + +// ArrayAppend attempts to append a value onto a JSON array at a path. If the +// target is not a JSON array then it will be converted into one, with its +// original contents set to the first element of the array. +func (g *Container) ArrayAppend(value interface{}, path ...string) error { + if array, ok := g.Search(path...).Data().([]interface{}); ok { + array = append(array, value) + _, err := g.Set(array, path...) + return err + } + + newArray := []interface{}{} + if d := g.Search(path...).Data(); d != nil { + newArray = append(newArray, d) + } + newArray = append(newArray, value) + + _, err := g.Set(newArray, path...) + return err +} + +// ArrayAppendP attempts to append a value onto a JSON array at a path using dot +// notation. If the target is not a JSON array then it will be converted into +// one, with its original contents set to the first element of the array. +func (g *Container) ArrayAppendP(value interface{}, path string) error { + return g.ArrayAppend(value, strings.Split(path, ".")...) +} + +// ArrayRemove attempts to remove an element identified by an index from a JSON +// array at a path. +func (g *Container) ArrayRemove(index int, path ...string) error { + if index < 0 { + return ErrOutOfBounds + } + array, ok := g.Search(path...).Data().([]interface{}) + if !ok { + return ErrNotArray + } + if index < len(array) { + array = append(array[:index], array[index+1:]...) + } else { + return ErrOutOfBounds + } + _, err := g.Set(array, path...) + return err +} + +// ArrayRemoveP attempts to remove an element identified by an index from a JSON +// array at a path using dot notation. +func (g *Container) ArrayRemoveP(index int, path string) error { + return g.ArrayRemove(index, strings.Split(path, ".")...) +} + +// ArrayElement attempts to access an element by an index from a JSON array at a +// path. +func (g *Container) ArrayElement(index int, path ...string) (*Container, error) { + if index < 0 { + return &Container{nil}, ErrOutOfBounds + } + array, ok := g.Search(path...).Data().([]interface{}) + if !ok { + return &Container{nil}, ErrNotArray + } + if index < len(array) { + return &Container{array[index]}, nil + } + return &Container{nil}, ErrOutOfBounds +} + +// ArrayElementP attempts to access an element by an index from a JSON array at +// a path using dot notation. +func (g *Container) ArrayElementP(index int, path string) (*Container, error) { + return g.ArrayElement(index, strings.Split(path, ".")...) +} + +// ArrayCount counts the number of elements in a JSON array at a path. +func (g *Container) ArrayCount(path ...string) (int, error) { + if array, ok := g.Search(path...).Data().([]interface{}); ok { + return len(array), nil + } + return 0, ErrNotArray +} + +// ArrayCountP counts the number of elements in a JSON array at a path using dot +// notation. +func (g *Container) ArrayCountP(path string) (int, error) { + return g.ArrayCount(strings.Split(path, ".")...) +} + +//------------------------------------------------------------------------------ + +// Bytes marshals an element to a JSON []byte blob. +func (g *Container) Bytes() []byte { + if g.Data() != nil { + if bytes, err := json.Marshal(g.object); err == nil { + return bytes + } + } + return []byte("{}") +} + +// BytesIndent marshals an element to a JSON []byte blob formatted with a prefix +// and indent string. +func (g *Container) BytesIndent(prefix string, indent string) []byte { + if g.object != nil { + if bytes, err := json.MarshalIndent(g.object, prefix, indent); err == nil { + return bytes + } + } + return []byte("{}") +} + +// String marshals an element to a JSON formatted string. +func (g *Container) String() string { + return string(g.Bytes()) +} + +// StringIndent marshals an element to a JSON string formatted with a prefix and +// indent string. +func (g *Container) StringIndent(prefix string, indent string) string { + return string(g.BytesIndent(prefix, indent)) +} + +// EncodeOpt is a functional option for the EncodeJSON method. +type EncodeOpt func(e *json.Encoder) + +// EncodeOptHTMLEscape sets the encoder to escape the JSON for html. +func EncodeOptHTMLEscape(doEscape bool) EncodeOpt { + return func(e *json.Encoder) { + e.SetEscapeHTML(doEscape) + } +} + +// EncodeOptIndent sets the encoder to indent the JSON output. +func EncodeOptIndent(prefix string, indent string) EncodeOpt { + return func(e *json.Encoder) { + e.SetIndent(prefix, indent) + } +} + +// EncodeJSON marshals an element to a JSON formatted []byte using a variant +// list of modifier functions for the encoder being used. Functions for +// modifying the output are prefixed with EncodeOpt, e.g. EncodeOptHTMLEscape. +func (g *Container) EncodeJSON(encodeOpts ...EncodeOpt) []byte { + var b bytes.Buffer + encoder := json.NewEncoder(&b) + encoder.SetEscapeHTML(false) // Do not escape by default. + for _, opt := range encodeOpts { + opt(encoder) + } + if err := encoder.Encode(g.object); err != nil { + return []byte("{}") + } + result := b.Bytes() + if len(result) > 0 { + result = result[:len(result)-1] + } + return result +} + +// New creates a new gabs JSON object. +func New() *Container { + return &Container{map[string]interface{}{}} +} + +// Consume an already unmarshalled JSON object (or a new map[string]interface{}) +// into a *Container. +func Consume(root interface{}) (*Container, error) { + return &Container{root}, nil +} + +// ParseJSON unmarshals a JSON byte slice into a *Container. +func ParseJSON(sample []byte) (*Container, error) { + var gabs Container + + if err := json.Unmarshal(sample, &gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +// ParseJSONDecoder applies a json.Decoder to a *Container. +func ParseJSONDecoder(decoder *json.Decoder) (*Container, error) { + var gabs Container + + if err := decoder.Decode(&gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +// ParseJSONFile reads a file and unmarshals the contents into a *Container. +func ParseJSONFile(path string) (*Container, error) { + if len(path) > 0 { + cBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + container, err := ParseJSON(cBytes) + if err != nil { + return nil, err + } + + return container, nil + } + return nil, ErrInvalidPath +} + +// ParseJSONBuffer reads a buffer and unmarshals the contents into a *Container. +func ParseJSONBuffer(buffer io.Reader) (*Container, error) { + var gabs Container + jsonDecoder := json.NewDecoder(buffer) + if err := jsonDecoder.Decode(&gabs.object); err != nil { + return nil, err + } + + return &gabs, nil +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/Jeffail/gabs/gabs_logo.png b/vendor/github.com/Jeffail/gabs/gabs_logo.png new file mode 100644 index 00000000000..b6c1fad9931 Binary files /dev/null and b/vendor/github.com/Jeffail/gabs/gabs_logo.png differ diff --git a/vendor/github.com/Jeffail/gabs/go.mod b/vendor/github.com/Jeffail/gabs/go.mod new file mode 100644 index 00000000000..ff2fc976614 --- /dev/null +++ b/vendor/github.com/Jeffail/gabs/go.mod @@ -0,0 +1 @@ +module github.com/Jeffail/gabs diff --git a/vendor/github.com/Jeffail/tunny/LICENSE b/vendor/github.com/Jeffail/tunny/LICENSE new file mode 100644 index 00000000000..99a62c6298f --- /dev/null +++ b/vendor/github.com/Jeffail/tunny/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Ashley Jeffs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Jeffail/tunny/README.md b/vendor/github.com/Jeffail/tunny/README.md new file mode 100644 index 00000000000..fa8ef45e91c --- /dev/null +++ b/vendor/github.com/Jeffail/tunny/README.md @@ -0,0 +1,134 @@ +![Tunny](tunny_logo.png "Tunny") + +[![godoc for Jeffail/tunny][1]][2] +[![goreportcard for Jeffail/tunny][3]][4] + +Tunny is a Golang library for spawning and managing a goroutine pool, allowing +you to limit work coming from any number of goroutines with a synchronous API. + +A fixed goroutine pool is helpful when you have work coming from an arbitrary +number of asynchronous sources, but a limited capacity for parallel processing. +For example, when processing jobs from HTTP requests that are CPU heavy you can +create a pool with a size that matches your CPU count. + +## Install + +``` sh +go get github.com/Jeffail/tunny +``` + +Or, using dep: + +``` sh +dep ensure -add github.com/Jeffail/tunny +``` + +## Use + +For most cases your heavy work can be expressed in a simple `func()`, where you +can use `NewFunc`. Let's see how this looks using our HTTP requests to CPU count +example: + +``` go +package main + +import ( + "io/ioutil" + "net/http" + "runtime" + + "github.com/Jeffail/tunny" +) + +func main() { + numCPUs := runtime.NumCPU() + + pool := tunny.NewFunc(numCPUs, func(payload interface{}) interface{} { + var result []byte + + // TODO: Something CPU heavy with payload + + return result + }) + defer pool.Close() + + http.HandleFunc("/work", func(w http.ResponseWriter, r *http.Request) { + input, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, "Internal error", http.StatusInternalServerError) + } + defer r.Body.Close() + + // Funnel this work into our pool. This call is synchronous and will + // block until the job is completed. + result := pool.Process(input) + + w.Write(result.([]byte)) + }) + + http.ListenAndServe(":8080", nil) +} +``` + +Tunny also supports timeouts. You can replace the `Process` call above to the +following: + +``` go +result, err := pool.ProcessTimed(input, time.Second*5) +if err == tunny.ErrJobTimedOut { + http.Error(w, "Request timed out", http.StatusRequestTimeout) +} +``` + +You can also use the context from the request (or any other context) to handle timeouts and deadlines. Simply replace the `Process` call to the following: + +``` go +result, err := pool.ProcessCtx(r.Context(), input) +if err == context.DeadlineExceeded { + http.Error(w, "Request timed out", http.StatusRequestTimeout) +} +``` + +## Changing Pool Size + +The size of a Tunny pool can be changed at any time with `SetSize(int)`: + +``` go +pool.SetSize(10) // 10 goroutines +pool.SetSize(100) // 100 goroutines +``` + +This is safe to perform from any goroutine even if others are still processing. + +## Goroutines With State + +Sometimes each goroutine within a Tunny pool will require its own managed state. +In this case you should implement [`tunny.Worker`][tunny-worker], which includes +calls for terminating, interrupting (in case a job times out and is no longer +needed) and blocking the next job allocation until a condition is met. + +When creating a pool using `Worker` types you will need to provide a constructor +function for spawning your custom implementation: + +``` go +pool := tunny.New(poolSize, func() Worker { + // TODO: Any per-goroutine state allocation here. + return newCustomWorker() +}) +``` + +This allows Tunny to create and destroy `Worker` types cleanly when the pool +size is changed. + +## Ordering + +Backlogged jobs are not guaranteed to be processed in order. Due to the current +implementation of channels and select blocks a stack of backlogged jobs will be +processed as a FIFO queue. However, this behaviour is not part of the spec and +should not be relied upon. + +[1]: https://godoc.org/github.com/Jeffail/tunny?status.svg +[2]: http://godoc.org/github.com/Jeffail/tunny +[3]: https://goreportcard.com/badge/github.com/Jeffail/tunny +[4]: https://goreportcard.com/report/Jeffail/tunny +[tunny-worker]: https://godoc.org/github.com/Jeffail/tunny#Worker diff --git a/vendor/github.com/Jeffail/tunny/go.mod b/vendor/github.com/Jeffail/tunny/go.mod new file mode 100644 index 00000000000..7448dabd9f2 --- /dev/null +++ b/vendor/github.com/Jeffail/tunny/go.mod @@ -0,0 +1,3 @@ +module github.com/Jeffail/tunny + +go 1.13 diff --git a/vendor/github.com/Jeffail/tunny/tunny.go b/vendor/github.com/Jeffail/tunny/tunny.go new file mode 100644 index 00000000000..5957983a178 --- /dev/null +++ b/vendor/github.com/Jeffail/tunny/tunny.go @@ -0,0 +1,309 @@ +// Copyright (c) 2014 Ashley Jeffs +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tunny + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "time" +) + +//------------------------------------------------------------------------------ + +// Errors that are used throughout the Tunny API. +var ( + ErrPoolNotRunning = errors.New("the pool is not running") + ErrJobNotFunc = errors.New("generic worker not given a func()") + ErrWorkerClosed = errors.New("worker was closed") + ErrJobTimedOut = errors.New("job request timed out") +) + +// Worker is an interface representing a Tunny working agent. It will be used to +// block a calling goroutine until ready to process a job, process that job +// synchronously, interrupt its own process call when jobs are abandoned, and +// clean up its resources when being removed from the pool. +// +// Each of these duties are implemented as a single method and can be averted +// when not needed by simply implementing an empty func. +type Worker interface { + // Process will synchronously perform a job and return the result. + Process(interface{}) interface{} + + // BlockUntilReady is called before each job is processed and must block the + // calling goroutine until the Worker is ready to process the next job. + BlockUntilReady() + + // Interrupt is called when a job is cancelled. The worker is responsible + // for unblocking the Process implementation. + Interrupt() + + // Terminate is called when a Worker is removed from the processing pool + // and is responsible for cleaning up any held resources. + Terminate() +} + +//------------------------------------------------------------------------------ + +// closureWorker is a minimal Worker implementation that simply wraps a +// func(interface{}) interface{} +type closureWorker struct { + processor func(interface{}) interface{} +} + +func (w *closureWorker) Process(payload interface{}) interface{} { + return w.processor(payload) +} + +func (w *closureWorker) BlockUntilReady() {} +func (w *closureWorker) Interrupt() {} +func (w *closureWorker) Terminate() {} + +//------------------------------------------------------------------------------ + +// callbackWorker is a minimal Worker implementation that attempts to cast +// each job into func() and either calls it if successful or returns +// ErrJobNotFunc. +type callbackWorker struct{} + +func (w *callbackWorker) Process(payload interface{}) interface{} { + f, ok := payload.(func()) + if !ok { + return ErrJobNotFunc + } + f() + return nil +} + +func (w *callbackWorker) BlockUntilReady() {} +func (w *callbackWorker) Interrupt() {} +func (w *callbackWorker) Terminate() {} + +//------------------------------------------------------------------------------ + +// Pool is a struct that manages a collection of workers, each with their own +// goroutine. The Pool can initialize, expand, compress and close the workers, +// as well as processing jobs with the workers synchronously. +type Pool struct { + queuedJobs int64 + + ctor func() Worker + workers []*workerWrapper + reqChan chan workRequest + + workerMut sync.Mutex +} + +// New creates a new Pool of workers that starts with n workers. You must +// provide a constructor function that creates new Worker types and when you +// change the size of the pool the constructor will be called to create each new +// Worker. +func New(n int, ctor func() Worker) *Pool { + p := &Pool{ + ctor: ctor, + reqChan: make(chan workRequest), + } + p.SetSize(n) + + return p +} + +// NewFunc creates a new Pool of workers where each worker will process using +// the provided func. +func NewFunc(n int, f func(interface{}) interface{}) *Pool { + return New(n, func() Worker { + return &closureWorker{ + processor: f, + } + }) +} + +// NewCallback creates a new Pool of workers where workers cast the job payload +// into a func() and runs it, or returns ErrNotFunc if the cast failed. +func NewCallback(n int) *Pool { + return New(n, func() Worker { + return &callbackWorker{} + }) +} + +//------------------------------------------------------------------------------ + +// Process will use the Pool to process a payload and synchronously return the +// result. Process can be called safely by any goroutines, but will panic if the +// Pool has been stopped. +func (p *Pool) Process(payload interface{}) interface{} { + atomic.AddInt64(&p.queuedJobs, 1) + + request, open := <-p.reqChan + if !open { + panic(ErrPoolNotRunning) + } + + request.jobChan <- payload + + payload, open = <-request.retChan + if !open { + panic(ErrWorkerClosed) + } + + atomic.AddInt64(&p.queuedJobs, -1) + return payload +} + +// ProcessTimed will use the Pool to process a payload and synchronously return +// the result. If the timeout occurs before the job has finished the worker will +// be interrupted and ErrJobTimedOut will be returned. ProcessTimed can be +// called safely by any goroutines. +func (p *Pool) ProcessTimed( + payload interface{}, + timeout time.Duration, +) (interface{}, error) { + atomic.AddInt64(&p.queuedJobs, 1) + defer atomic.AddInt64(&p.queuedJobs, -1) + + tout := time.NewTimer(timeout) + + var request workRequest + var open bool + + select { + case request, open = <-p.reqChan: + if !open { + return nil, ErrPoolNotRunning + } + case <-tout.C: + return nil, ErrJobTimedOut + } + + select { + case request.jobChan <- payload: + case <-tout.C: + request.interruptFunc() + return nil, ErrJobTimedOut + } + + select { + case payload, open = <-request.retChan: + if !open { + return nil, ErrWorkerClosed + } + case <-tout.C: + request.interruptFunc() + return nil, ErrJobTimedOut + } + + tout.Stop() + return payload, nil +} + +// ProcessCtx will use the Pool to process a payload and synchronously return +// the result. If the context cancels before the job has finished the worker will +// be interrupted and ErrJobTimedOut will be returned. ProcessCtx can be +// called safely by any goroutines. +func (p *Pool) ProcessCtx(ctx context.Context, payload interface{}) (interface{}, error) { + atomic.AddInt64(&p.queuedJobs, 1) + defer atomic.AddInt64(&p.queuedJobs, -1) + + var request workRequest + var open bool + + select { + case request, open = <-p.reqChan: + if !open { + return nil, ErrPoolNotRunning + } + case <-ctx.Done(): + return nil, ctx.Err() + } + + select { + case request.jobChan <- payload: + case <-ctx.Done(): + request.interruptFunc() + return nil, ctx.Err() + } + + select { + case payload, open = <-request.retChan: + if !open { + return nil, ErrWorkerClosed + } + case <-ctx.Done(): + request.interruptFunc() + return nil, ctx.Err() + } + + return payload, nil +} + +// QueueLength returns the current count of pending queued jobs. +func (p *Pool) QueueLength() int64 { + return atomic.LoadInt64(&p.queuedJobs) +} + +// SetSize changes the total number of workers in the Pool. This can be called +// by any goroutine at any time unless the Pool has been stopped, in which case +// a panic will occur. +func (p *Pool) SetSize(n int) { + p.workerMut.Lock() + defer p.workerMut.Unlock() + + lWorkers := len(p.workers) + if lWorkers == n { + return + } + + // Add extra workers if N > len(workers) + for i := lWorkers; i < n; i++ { + p.workers = append(p.workers, newWorkerWrapper(p.reqChan, p.ctor())) + } + + // Asynchronously stop all workers > N + for i := n; i < lWorkers; i++ { + p.workers[i].stop() + } + + // Synchronously wait for all workers > N to stop + for i := n; i < lWorkers; i++ { + p.workers[i].join() + p.workers[i] = nil + } + + // Remove stopped workers from slice + p.workers = p.workers[:n] +} + +// GetSize returns the current size of the pool. +func (p *Pool) GetSize() int { + p.workerMut.Lock() + defer p.workerMut.Unlock() + + return len(p.workers) +} + +// Close will terminate all workers and close the job channel of this Pool. +func (p *Pool) Close() { + p.SetSize(0) + close(p.reqChan) +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/Jeffail/tunny/tunny_logo.png b/vendor/github.com/Jeffail/tunny/tunny_logo.png new file mode 100644 index 00000000000..16b7b73b5d9 Binary files /dev/null and b/vendor/github.com/Jeffail/tunny/tunny_logo.png differ diff --git a/vendor/github.com/Jeffail/tunny/worker.go b/vendor/github.com/Jeffail/tunny/worker.go new file mode 100644 index 00000000000..5d9c522a67d --- /dev/null +++ b/vendor/github.com/Jeffail/tunny/worker.go @@ -0,0 +1,126 @@ +// Copyright (c) 2014 Ashley Jeffs +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tunny + +//------------------------------------------------------------------------------ + +// workRequest is a struct containing context representing a workers intention +// to receive a work payload. +type workRequest struct { + // jobChan is used to send the payload to this worker. + jobChan chan<- interface{} + + // retChan is used to read the result from this worker. + retChan <-chan interface{} + + // interruptFunc can be called to cancel a running job. When called it is no + // longer necessary to read from retChan. + interruptFunc func() +} + +//------------------------------------------------------------------------------ + +// workerWrapper takes a Worker implementation and wraps it within a goroutine +// and channel arrangement. The workerWrapper is responsible for managing the +// lifetime of both the Worker and the goroutine. +type workerWrapper struct { + worker Worker + interruptChan chan struct{} + + // reqChan is NOT owned by this type, it is used to send requests for work. + reqChan chan<- workRequest + + // closeChan can be closed in order to cleanly shutdown this worker. + closeChan chan struct{} + + // closedChan is closed by the run() goroutine when it exits. + closedChan chan struct{} +} + +func newWorkerWrapper( + reqChan chan<- workRequest, + worker Worker, +) *workerWrapper { + w := workerWrapper{ + worker: worker, + interruptChan: make(chan struct{}), + reqChan: reqChan, + closeChan: make(chan struct{}), + closedChan: make(chan struct{}), + } + + go w.run() + + return &w +} + +//------------------------------------------------------------------------------ + +func (w *workerWrapper) interrupt() { + close(w.interruptChan) + w.worker.Interrupt() +} + +func (w *workerWrapper) run() { + jobChan, retChan := make(chan interface{}), make(chan interface{}) + defer func() { + w.worker.Terminate() + close(retChan) + close(w.closedChan) + }() + + for { + // NOTE: Blocking here will prevent the worker from closing down. + w.worker.BlockUntilReady() + select { + case w.reqChan <- workRequest{ + jobChan: jobChan, + retChan: retChan, + interruptFunc: w.interrupt, + }: + select { + case payload := <-jobChan: + result := w.worker.Process(payload) + select { + case retChan <- result: + case <-w.interruptChan: + w.interruptChan = make(chan struct{}) + } + case _, _ = <-w.interruptChan: + w.interruptChan = make(chan struct{}) + } + case <-w.closeChan: + return + } + } +} + +//------------------------------------------------------------------------------ + +func (w *workerWrapper) stop() { + close(w.closeChan) +} + +func (w *workerWrapper) join() { + <-w.closedChan +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 00000000000..4025e01ec4a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 00000000000..d700ec47f2b --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 00000000000..163ffe72a82 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 00000000000..657564a8474 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 00000000000..8dbd9248583 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 00000000000..272670231ab --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 00000000000..741bb530e8a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 00000000000..034cad8e210 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 00000000000..096369d44d9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,29 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 00000000000..e405c9a84d9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 00000000000..9ff7da9c48b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 00000000000..a7a1b4e36de --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 00000000000..1b52d2f4362 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,194 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 00000000000..b2778df15a4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 00000000000..a78235895fd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 00000000000..b94b93413f3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,423 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 00000000000..6a6c24c6d6e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000000..6b061e6174b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000000..fdbdf1448c3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - deadcode + - dupl + - errcheck + - gofmt + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - structcheck + - unused + - varcheck + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 400 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000000..1f90c38d260 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,194 @@ +# Changelog + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 00000000000..9ff7da9c48b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000000..eac19178fbd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000000..d8f54dcbd3c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000000..a78235895fd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000000..547613f044f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,568 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*\,?)+$`, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000000..391aa46b76d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3 < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` + * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + * `^0.2` is equivalent to `>=0.2.0 <0.3.0` + * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + * `^0.0` is equivalent to `>=0.0.0 <0.1.0` + * `^0` is equivalent to `>=0.0.0 <1.0.0` + +Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 00000000000..a242ad70587 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/go.mod b/vendor/github.com/Masterminds/semver/v3/go.mod new file mode 100644 index 00000000000..658233c8f01 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/Masterminds/semver/v3 + +go 1.12 diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000000..d6b9cda3eeb --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,606 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const num string = "0123456789" +const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x +// releases of semver provided use the NewSemver() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 00000000000..400d4f93412 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,425 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 00000000000..b42bcd62b95 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/Masterminds/sprig/.gitignore b/vendor/github.com/Masterminds/sprig/.gitignore new file mode 100644 index 00000000000..5e3002f88f5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml new file mode 100644 index 00000000000..b9da8b825bb --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/.travis.yml @@ -0,0 +1,26 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md new file mode 100644 index 00000000000..6a79fbde469 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/CHANGELOG.md @@ -0,0 +1,282 @@ +# Changelog + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/LICENSE.txt b/vendor/github.com/Masterminds/sprig/LICENSE.txt new file mode 100644 index 00000000000..5c95accc2e2 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/LICENSE.txt @@ -0,0 +1,20 @@ +Sprig +Copyright (C) 2013 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/Makefile b/vendor/github.com/Masterminds/sprig/Makefile new file mode 100644 index 00000000000..63a93fdf798 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/Makefile @@ -0,0 +1,13 @@ + +HAS_GLIDE := $(shell command -v glide;) + +.PHONY: test +test: + go test -v . + +.PHONY: setup +setup: +ifndef HAS_GLIDE + go get -u github.com/Masterminds/glide +endif + glide install diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/README.md new file mode 100644 index 00000000000..b70569585f8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/README.md @@ -0,0 +1,78 @@ +# Sprig: Template functions for Go templates +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/appveyor.yml b/vendor/github.com/Masterminds/sprig/appveyor.yml new file mode 100644 index 00000000000..d545a987a3b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/appveyor.yml @@ -0,0 +1,26 @@ + +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\sprig +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go get -u github.com/Masterminds/glide + - set PATH=%GOPATH%\bin;%PATH% + - go version + - go env + +build_script: + - glide install + - go install ./... + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go new file mode 100644 index 00000000000..7a418ba88d1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/crypto.go @@ -0,0 +1,502 @@ +package sprig + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "hash/adler32" + "math/big" + "net" + "time" + + "github.com/google/uuid" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return fmt.Sprintf("%s", uuid.New()) +} + +var master_password_seed = "com.lyndir.masterpassword" + +var password_type_templates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var template_characters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, password_type, password, user, site string) string { + var templates = password_type_templates[password_type] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", password_type) + } + + var buffer bytes.Buffer + buffer.WriteString(master_password_seed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(master_password_seed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + pass_chars := template_characters[element] + pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] + buffer.WriteByte(pass_char) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + return nil + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + decodedKey, _ := pem.Decode(key) + if decodedKey == nil { + return crt, errors.New("unable to decode key") + } + _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing prive key: decodedKey.Bytes: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return ca, fmt.Errorf("error generating rsa key: %s", err) + } + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + if err != nil { + return ca, err + } + + return ca, nil +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + if err != nil { + return cert, err + } + + return cert, nil +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) + if decodedSignerKey == nil { + return cert, errors.New("unable to decode key") + } + signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing prive key: decodedSignerKey.Bytes: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + if err != nil { + return cert, err + } + + return cert, nil +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey *rsa.PrivateKey, + parent *x509.Certificate, + signingKey *rsa.PrivateKey, +) (string, string, error) { + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + &signeeKey.PublicKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), + }, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/date.go new file mode 100644 index 00000000000..d1d6155d72c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/date.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/defaults.go new file mode 100644 index 00000000000..ed6a8ab291c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/defaults.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "encoding/json" + "reflect" +) + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return g.Bool() == false + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go new file mode 100644 index 00000000000..738405b4332 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/dict.go @@ -0,0 +1,119 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := copystructure.Copy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/doc.go new file mode 100644 index 00000000000..8f8f1d73703 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/doc.go @@ -0,0 +1,19 @@ +/* +Sprig: Template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go new file mode 100644 index 00000000000..7b5b0af86c0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/functions.go @@ -0,0 +1,306 @@ +package sprig + +import ( + "errors" + "html/template" + "os" + "path" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" +) + +// Produce the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environemnt or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "now": func() time.Time { return time.Now() }, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "dateInZone": dateInZone, + "dateModify": dateModify, + "ago": dateAgo, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "biggest": max, + "max": max, + "min": min, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "compact": compact, + "deepCopy": deepCopy, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": func(s string) string { return os.Getenv(s) }, + "expandenv": func(s string) string { return os.ExpandEnv(s) }, + + // Network: + "getHostByName": getHostByName, + + // File Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "values": values, + + "append": push, "push": push, + "prepend": prepend, + "first": first, + "rest": rest, + "last": last, + "initial": initial, + "reverse": reverse, + "uniq": uniq, + "without": without, + "has": has, + "slice": slice, + "concat": concat, + + // Crypto: + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSignedCert": generateSignedCertificate, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "regexFindAll": regexFindAll, + "regexFind": regexFind, + "regexReplaceAll": regexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "regexSplit": regexSplit, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml new file mode 100644 index 00000000000..f317d2b2b16 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/glide.yaml @@ -0,0 +1,19 @@ +package: github.com/Masterminds/sprig +import: +- package: github.com/Masterminds/goutils + version: ^1.0.0 +- package: github.com/google/uuid + version: ^1.0.0 +- package: golang.org/x/crypto + subpackages: + - scrypt +- package: github.com/Masterminds/semver + version: ^v1.2.2 +- package: github.com/stretchr/testify + version: ^v1.2.2 +- package: github.com/imdario/mergo + version: ~0.3.7 +- package: github.com/huandu/xstrings + version: ^1.2 +- package: github.com/mitchellh/copystructure + version: ^1.0.0 diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go new file mode 100644 index 00000000000..c0381bbb650 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/list.go @@ -0,0 +1,311 @@ +package sprig + +import ( + "fmt" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v) + + default: + panic(fmt.Sprintf("Cannot push on type %s", tp)) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...) + + default: + panic(fmt.Sprintf("Cannot prepend on type %s", tp)) + } +} + +func last(list interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + return l2.Index(l - 1).Interface() + default: + panic(fmt.Sprintf("Cannot find last on type %s", tp)) + } +} + +func first(list interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + return l2.Index(0).Interface() + default: + panic(fmt.Sprintf("Cannot find first on type %s", tp)) + } +} + +func rest(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find rest on type %s", tp)) + } +} + +func initial(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find initial on type %s", tp)) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) + } +} + +func compact(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl + default: + panic(fmt.Sprintf("Cannot compact on type %s", tp)) + } +} + +func uniq(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest + default: + panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res + default: + panic(fmt.Sprintf("Cannot find without on type %s", tp)) + } +} + +func has(needle interface{}, haystack interface{}) bool { + if haystack == nil { + return false + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true + } + } + + return false + default: + panic(fmt.Sprintf("Cannot find has on type %s", tp)) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface() + default: + panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/network.go new file mode 100644 index 00000000000..d786cc7363b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 cames out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go new file mode 100644 index 00000000000..f4af4af2a7f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/numeric.go @@ -0,0 +1,169 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() == true { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() == true { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, r_opt ...float64) float64 { + roundOn := .5 + if len(r_opt) > 0 { + roundOn = r_opt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} diff --git a/vendor/github.com/Masterminds/sprig/reflect.go b/vendor/github.com/Masterminds/sprig/reflect.go new file mode 100644 index 00000000000..8a65c132f08 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go new file mode 100644 index 00000000000..2016f66336f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/regex.go @@ -0,0 +1,35 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} diff --git a/vendor/github.com/Masterminds/sprig/semver.go b/vendor/github.com/Masterminds/sprig/semver.go new file mode 100644 index 00000000000..c2bf8a1fdf3 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/strings.go new file mode 100644 index 00000000000..943fa3e8ad5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/strings.go @@ -0,0 +1,233 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } else { + return []string{strval(v)} + } + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if len(s) <= c { + return s + } + return s[0:c] +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/url.go new file mode 100644 index 00000000000..5f22d801f92 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key]; if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedUrl, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedUrl.Scheme + dict["host"] = parsedUrl.Host + dict["hostname"] = parsedUrl.Hostname() + dict["path"] = parsedUrl.Path + dict["query"] = parsedUrl.RawQuery + dict["opaque"] = parsedUrl.Opaque + dict["fragment"] = parsedUrl.Fragment + if parsedUrl.User != nil { + dict["userinfo"] = parsedUrl.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resUrl := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo = nil + if userinfo != "" { + tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempUrl.User + } + + resUrl.User = user + return resUrl.String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 00000000000..5e3002f88f5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 00000000000..fcdd4e88aed --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,370 @@ +# Changelog + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 00000000000..f311b1eaaaa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 00000000000..78d409cde2c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 00000000000..c37ba01c216 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,101 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.8 of that package. +Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at +https://github.com/imdario/mergo/issues/139. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 00000000000..13a5cd55934 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 00000000000..ed022ddacac --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 00000000000..b9f979666dd --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 00000000000..ade88969840 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 00000000000..aabb9d4489f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 00000000000..57fcec1d9ea --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/go.mod b/vendor/github.com/Masterminds/sprig/v3/go.mod new file mode 100644 index 00000000000..c2597092ac0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/go.mod @@ -0,0 +1,16 @@ +module github.com/Masterminds/sprig/v3 + +go 1.13 + +require ( + github.com/Masterminds/goutils v1.1.1 + github.com/Masterminds/semver/v3 v3.1.1 + github.com/google/uuid v1.1.1 + github.com/huandu/xstrings v1.3.1 + github.com/imdario/mergo v0.3.11 + github.com/mitchellh/copystructure v1.0.0 + github.com/shopspring/decimal v1.2.0 + github.com/spf13/cast v1.3.1 + github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 +) diff --git a/vendor/github.com/Masterminds/sprig/v3/go.sum b/vendor/github.com/Masterminds/sprig/v3/go.sum new file mode 100644 index 00000000000..b0e7f018b03 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/go.sum @@ -0,0 +1,52 @@ +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 00000000000..ca0fbb78932 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 00000000000..108d78a9462 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 00000000000..f68e4182ee6 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 00000000000..8a65c132f08 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 00000000000..fab55101897 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 00000000000..3fbe08aa637 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 00000000000..e0ae628c841 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 00000000000..b8e120e19ba --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore new file mode 100644 index 00000000000..2c9adc20b31 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.test + +# Folders +_obj +_test +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +coverage.txt +profile.out + +simplest-uncommitted-msg-0.1-jar-with-dependencies.jar diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml new file mode 100644 index 00000000000..09e5c468cb8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -0,0 +1,74 @@ +run: + timeout: 5m + deadline: 10m + +linters-settings: + govet: + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 99 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + goimports: + local-prefixes: github.com/Shopify/sarama + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - wrapperFunc + - ifElseChain + funlen: + lines: 300 + statements: 300 + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + - funlen + - gochecknoinits + # - goconst + # - gocritic + - gocyclo + - gofmt + - goimports + # - golint + - gosec + # - gosimple + - govet + # - ineffassign + # - misspell + # - nakedret + # - scopelint + - staticcheck + - structcheck + # - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + +issues: + exclude: + - "G404: Use of weak random number generator" + # maximum count of issues with the same text. set to 0 for unlimited. default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md new file mode 100644 index 00000000000..59ccd1de581 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -0,0 +1,1019 @@ +# Changelog + +#### Unreleased + +#### Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +#### Version 1.27.2 (2020-10-21) + +# Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +# Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +#### Version 1.27.1 (2020-10-07) + +# Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +# Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +#### Version 1.27.0 (2020-08-11) + +# Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +# Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +#### Version 1.26.4 (2020-05-19) + +# Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +#### Version 1.26.3 (2020-05-07) + +# Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +#### Version 1.26.2 (2020-05-06) + +# âš ï¸ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +# Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +# Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +#### Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) + +#### Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/Shopify/sarama/pull/1574), + [1582](https://github.com/Shopify/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/Shopify/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/Shopify/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/Shopify/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/Shopify/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/Shopify/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/Shopify/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/Shopify/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/Shopify/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/Shopify/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/Shopify/sarama/pull/1586)). + +#### Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/Shopify/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/Shopify/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/Shopify/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/Shopify/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/Shopify/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/Shopify/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/Shopify/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/Shopify/sarama/pull/1545)). + +#### Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/Shopify/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/Shopify/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/Shopify/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/Shopify/sarama/pull/1529)). + +#### Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/Shopify/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/Shopify/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/Shopify/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/Shopify/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/Shopify/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/Shopify/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/Shopify/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/Shopify/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/Shopify/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/Shopify/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/Shopify/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/Shopify/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/Shopify/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/Shopify/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/Shopify/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/Shopify/sarama/issues/1252 + +#### Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/Shopify/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/Shopify/sarama/pull/1428)). + +#### Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/Shopify/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/Shopify/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/Shopify/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/Shopify/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/Shopify/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/Shopify/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/Shopify/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/Shopify/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/Shopify/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/Shopify/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/Shopify/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/Shopify/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/Shopify/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/Shopify/sarama/pull/1368)). + +#### Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/Shopify/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/Shopify/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/Shopify/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/Shopify/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/Shopify/sarama/pull/1344)). + +#### Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/Shopify/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/Shopify/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/Shopify/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/Shopify/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/Shopify/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/Shopify/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/Shopify/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/Shopify/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/Shopify/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/Shopify/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/Shopify/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/Shopify/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/Shopify/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/Shopify/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/Shopify/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/Shopify/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/Shopify/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/Shopify/sarama/pull/1156)). + +#### Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/Shopify/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/Shopify/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/Shopify/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/Shopify/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/Shopify/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/Shopify/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/Shopify/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/Shopify/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/Shopify/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/Shopify/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/Shopify/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/Shopify/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/Shopify/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/Shopify/sarama/pull/1273)). + +#### Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/Shopify/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/Shopify/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/Shopify/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/Shopify/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/Shopify/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/Shopify/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/Shopify/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/Shopify/sarama/pull/1141)). + +#### Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/Shopify/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/Shopify/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/Shopify/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/Shopify/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/Shopify/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/Shopify/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/Shopify/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/Shopify/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/Shopify/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/Shopify/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/Shopify/sarama/pull/1228)). + +#### Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/Shopify/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/Shopify/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + +#### Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/Shopify/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/Shopify/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/Shopify/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/Shopify/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/Shopify/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/Shopify/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/Shopify/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/Shopify/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/Shopify/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/Shopify/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/Shopify/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/Shopify/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/Shopify/sarama/pull/1125)). + +#### Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/Shopify/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/Shopify/sarama/pull/1047), + [#1069](https://github.com/Shopify/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/Shopify/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/Shopify/sarama/pull/1065), + [#1096](https://github.com/Shopify/sarama/pull/1096), + [#1027](https://github.com/Shopify/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/Shopify/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/Shopify/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/Shopify/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/Shopify/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/Shopify/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/Shopify/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/Shopify/sarama/pull/1050), + [#1051](https://github.com/Shopify/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/Shopify/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/Shopify/sarama/pull/1092)). + +#### Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/Shopify/sarama/pull/1007), + [#1008](https://github.com/Shopify/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/Shopify/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/Shopify/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/Shopify/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/Shopify/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/Shopify/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/Shopify/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/Shopify/sarama/pull/1002), + [#1015](https://github.com/Shopify/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/Shopify/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/Shopify/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/Shopify/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/Shopify/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/Shopify/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/Shopify/sarama/pull/1035)). + +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + +#### Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/Shopify/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/Shopify/sarama/pull/837), + [#841](https://github.com/Shopify/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/Shopify/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/Shopify/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/Shopify/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/Shopify/sarama/pull/859)). + +#### Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/Shopify/sarama/pull/701), + [#746](https://github.com/Shopify/sarama/pull/746), + [#766](https://github.com/Shopify/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/Shopify/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/Shopify/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/Shopify/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/Shopify/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/Shopify/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/Shopify/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/Shopify/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/Shopify/sarama/pull/795)). + +#### Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/Shopify/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/Shopify/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/Shopify/sarama/pull/730), + [#733](https://github.com/Shopify/sarama/pull/733), + [#734](https://github.com/Shopify/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/Shopify/sarama/pull/735)). + +#### Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and +[#713](https://github.com/Shopify/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/Shopify/sarama/pull/672), + [#678](https://github.com/Shopify/sarama/pull/678), + [#681](https://github.com/Shopify/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/Shopify/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/Shopify/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/Shopify/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/Shopify/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/Shopify/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/Shopify/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/Shopify/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/Shopify/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/Shopify/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/Shopify/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/Shopify/sarama/pull/709)). + +#### Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/Shopify/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/Shopify/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/Shopify/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/Shopify/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/Shopify/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/Shopify/sarama/pull/605), + [#621](https://github.com/Shopify/sarama/pull/621), + [#654](https://github.com/Shopify/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/Shopify/sarama/pull/658)). + +#### Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/Shopify/sarama/pull/586), + [#588](https://github.com/Shopify/sarama/pull/588), + [#590](https://github.com/Shopify/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/Shopify/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/Shopify/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/Shopify/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/Shopify/sarama/pull/589)). + +#### Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/Shopify/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/Shopify/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/Shopify/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/Shopify/sarama/pull/549), + [#550](https://github.com/Shopify/sarama/pull/550), + [#551](https://github.com/Shopify/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/Shopify/sarama/pull/553)). + +#### Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/Shopify/sarama/pull/449)). + +#### Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/Shopify/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/Shopify/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), + [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways + ([#528](https://github.com/Shopify/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/Shopify/sarama/pull/529)). + +#### Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/Shopify/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/Shopify/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/Shopify/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/Shopify/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/Shopify/sarama/pull/475)). + +#### Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/Shopify/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). + +#### Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/Shopify/sarama/pull/456)). + +#### Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/Shopify/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/Shopify/sarama/pull/450), + [#451](https://github.com/Shopify/sarama/pull/451)). + +#### Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/Shopify/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/Shopify/sarama/pull/439), + [#442](https://github.com/Shopify/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/Shopify/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/Shopify/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/Shopify/sarama/pull/325)). + +#### Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/Shopify/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/Shopify/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/Shopify/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/Shopify/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/Shopify/sarama/pull/422)). + +#### Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/Shopify/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/Shopify/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/Shopify/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/Shopify/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/Shopify/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/Shopify/sarama/pull/390), + [#400](https://github.com/Shopify/sarama/pull/400)). + +#### Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/Shopify/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/Shopify/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/Shopify/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). + + +#### Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE new file mode 100644 index 00000000000..d2bf4352f4c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile new file mode 100644 index 00000000000..4714d7798fe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -0,0 +1,31 @@ +default: fmt get update test lint + +GO := go +GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic + +FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') +TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') + +get: + $(GO) get ./... + $(GO) mod verify + $(GO) mod tidy + +update: + $(GO) get -u -v ./... + $(GO) mod verify + $(GO) mod tidy + +fmt: + gofmt -s -l -w $(FILES) $(TESTS) + +lint: + GOFLAGS="-tags=functional" golangci-lint run + +test: + $(GOTEST) ./... + +.PHONY: test_functional +test_functional: + $(GOTEST) -tags=functional ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md new file mode 100644 index 00000000000..f2beb73931a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/README.md @@ -0,0 +1,36 @@ +# sarama + +[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) +[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) + +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). + +## Getting started + +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- Mocks for testing are available in the [mocks](./mocks) subpackage. +- The [examples](./examples) directory contains more elaborate example applications. +- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. + +You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). + +## Compatibility and API stability + +Sarama provides a "2 releases + 2 months" compatibility guarantee: we support +the two latest stable releases of Kafka and Go, and we provide a two month +grace period for older releases. This means we currently officially support +Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are +still likely to work. + +Sarama follows semantic versioning and provides API stability via the gopkg.in service. +You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +A changelog is available [here](CHANGELOG.md). + +## Contributing + +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. +- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +- If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile new file mode 100644 index 00000000000..07d7ffb8ff4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -0,0 +1,14 @@ +# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB +MEMORY = 3072 + +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/bionic64" + + config.vm.provision :shell, path: "vagrant/provision.sh" + + config.vm.network "private_network", ip: "192.168.100.67" + + config.vm.provider "virtualbox" do |v| + v.memory = MEMORY + end +end diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 00000000000..13440be677c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,138 @@ +package sarama + +// Resource holds information about acl resource type +type Resource struct { + ResourceType AclResourceType + ResourceName string + ResourcePatternType AclResourcePatternType +} + +func (r *Resource) encode(pe packetEncoder, version int16) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + if version == 1 { + if r.ResourcePatternType == AclPatternUnknown { + Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead") + r.ResourcePatternType = AclPatternLiteral + } + pe.putInt8(int8(r.ResourcePatternType)) + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + if version == 1 { + pattern, err := pd.getInt8() + if err != nil { + return err + } + r.ResourcePatternType = AclResourcePatternType(pattern) + } + + return nil +} + +// Acl holds information about acl type +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +// ResourceAcls is an acl resource type +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { + if err := r.Resource.encode(pe, version); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 00000000000..449102f74a7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,89 @@ +package sarama + +// CreateAclsRequest is an acl creation request +type CreateAclsRequest struct { + Version int16 + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) key() int16 { + return 30 +} + +func (c *CreateAclsRequest) version() int16 { + return c.Version +} + +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateAclsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +// AclCreation is a wrapper around Resource and Acl type +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder, version int16) error { + if err := a.Resource.encode(pe, version); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 00000000000..21d6c340cc5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +// CreateAclsResponse is a an acl response creation type +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) key() int16 { + return 30 +} + +func (c *CreateAclsResponse) version() int16 { + return 0 +} + +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +// AclCreationResponse is an acl creation response type +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 00000000000..5e5c03bc2da --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,62 @@ +package sarama + +// DeleteAclsRequest is a delete acl request +type DeleteAclsRequest struct { + Version int + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + filter.Version = d.Version + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + d.Filters[i].Version = int(version) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return int16(d.Version) +} + +func (d *DeleteAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 00000000000..cd33749d5e5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,163 @@ +package sarama + +import "time" + +// DeleteAclsResponse is a delete acl response +type DeleteAclsResponse struct { + Version int16 + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (d *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range d.FilterResponses { + if err := filterResponse.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + d.FilterResponses[i] = new(FilterResponse) + if err := d.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return d.Version +} + +func (d *DeleteAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +// FilterResponse is a filter response type +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe, version); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +// MatchingAcl is a matching acl type +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe, version); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 00000000000..e0fe9023a28 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,39 @@ +package sarama + +// DescribeAclsRequest is a secribe acl request type +type DescribeAclsRequest struct { + Version int + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + d.AclFilter.Version = d.Version + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + d.AclFilter.Version = int(version) + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return int16(d.Version) +} + +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 00000000000..3255fd48571 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,91 @@ +package sarama + +import "time" + +// DescribeAclsResponse is a describe acl response type +type DescribeAclsResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return d.Version +} + +func (d *DescribeAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 00000000000..b380161aa45 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,77 @@ +package sarama + +type AclFilter struct { + Version int + ResourceType AclResourceType + ResourceName *string + ResourcePatternTypeFilter AclResourcePatternType + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + + if a.Version == 1 { + pe.putInt8(int8(a.ResourcePatternTypeFilter)) + } + + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Version == 1 { + pattern, err := pd.getInt8() + if err != nil { + return err + } + + a.ResourcePatternTypeFilter = AclResourcePatternType(pattern) + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 00000000000..c3ba8ddcf64 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,238 @@ +package sarama + +import ( + "fmt" + "strings" +) + +type ( + AclOperation int + + AclPermissionType int + + AclResourceType int + + AclResourcePatternType int +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = iota + AclOperationAny + AclOperationAll + AclOperationRead + AclOperationWrite + AclOperationCreate + AclOperationDelete + AclOperationAlter + AclOperationDescribe + AclOperationClusterAction + AclOperationDescribeConfigs + AclOperationAlterConfigs + AclOperationIdempotentWrite +) + +func (a *AclOperation) String() string { + mapping := map[AclOperation]string{ + AclOperationUnknown: "Unknown", + AclOperationAny: "Any", + AclOperationAll: "All", + AclOperationRead: "Read", + AclOperationWrite: "Write", + AclOperationCreate: "Create", + AclOperationDelete: "Delete", + AclOperationAlter: "Alter", + AclOperationDescribe: "Describe", + AclOperationClusterAction: "ClusterAction", + AclOperationDescribeConfigs: "DescribeConfigs", + AclOperationAlterConfigs: "AlterConfigs", + AclOperationIdempotentWrite: "IdempotentWrite", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclOperationUnknown] + } + return s +} + +// MarshalText returns the text form of the AclOperation (name without prefix) +func (a *AclOperation) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +func (a *AclOperation) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclOperation{ + "unknown": AclOperationUnknown, + "any": AclOperationAny, + "all": AclOperationAll, + "read": AclOperationRead, + "write": AclOperationWrite, + "create": AclOperationCreate, + "delete": AclOperationDelete, + "alter": AclOperationAlter, + "describe": AclOperationDescribe, + "clusteraction": AclOperationClusterAction, + "describeconfigs": AclOperationDescribeConfigs, + "alterconfigs": AclOperationAlterConfigs, + "idempotentwrite": AclOperationIdempotentWrite, + } + ao, ok := mapping[normalized] + if !ok { + *a = AclOperationUnknown + return fmt.Errorf("no acl operation with name %s", normalized) + } + *a = ao + return nil +} + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = iota + AclPermissionAny + AclPermissionDeny + AclPermissionAllow +) + +func (a *AclPermissionType) String() string { + mapping := map[AclPermissionType]string{ + AclPermissionUnknown: "Unknown", + AclPermissionAny: "Any", + AclPermissionDeny: "Deny", + AclPermissionAllow: "Allow", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclPermissionUnknown] + } + return s +} + +// MarshalText returns the text form of the AclPermissionType (name without prefix) +func (a *AclPermissionType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +func (a *AclPermissionType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclPermissionType{ + "unknown": AclPermissionUnknown, + "any": AclPermissionAny, + "deny": AclPermissionDeny, + "allow": AclPermissionAllow, + } + + apt, ok := mapping[normalized] + if !ok { + *a = AclPermissionUnknown + return fmt.Errorf("no acl permission with name %s", normalized) + } + *a = apt + return nil +} + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = iota + AclResourceAny + AclResourceTopic + AclResourceGroup + AclResourceCluster + AclResourceTransactionalID + AclResourceDelegationToken +) + +func (a *AclResourceType) String() string { + mapping := map[AclResourceType]string{ + AclResourceUnknown: "Unknown", + AclResourceAny: "Any", + AclResourceTopic: "Topic", + AclResourceGroup: "Group", + AclResourceCluster: "Cluster", + AclResourceTransactionalID: "TransactionalID", + AclResourceDelegationToken: "DelegationToken", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclResourceUnknown] + } + return s +} + +// MarshalText returns the text form of the AclResourceType (name without prefix) +func (a *AclResourceType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +func (a *AclResourceType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclResourceType{ + "unknown": AclResourceUnknown, + "any": AclResourceAny, + "topic": AclResourceTopic, + "group": AclResourceGroup, + "cluster": AclResourceCluster, + "transactionalid": AclResourceTransactionalID, + "delegationtoken": AclResourceDelegationToken, + } + + art, ok := mapping[normalized] + if !ok { + *a = AclResourceUnknown + return fmt.Errorf("no acl resource with name %s", normalized) + } + *a = art + return nil +} + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +const ( + AclPatternUnknown AclResourcePatternType = iota + AclPatternAny + AclPatternMatch + AclPatternLiteral + AclPatternPrefixed +) + +func (a *AclResourcePatternType) String() string { + mapping := map[AclResourcePatternType]string{ + AclPatternUnknown: "Unknown", + AclPatternAny: "Any", + AclPatternMatch: "Match", + AclPatternLiteral: "Literal", + AclPatternPrefixed: "Prefixed", + } + s, ok := mapping[*a] + if !ok { + s = mapping[AclPatternUnknown] + } + return s +} + +// MarshalText returns the text form of the AclResourcePatternType (name without prefix) +func (a *AclResourcePatternType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +func (a *AclResourcePatternType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]AclResourcePatternType{ + "unknown": AclPatternUnknown, + "any": AclPatternAny, + "match": AclPatternMatch, + "literal": AclPatternLiteral, + "prefixed": AclPatternPrefixed, + } + + arpt, ok := mapping[normalized] + if !ok { + *a = AclPatternUnknown + return fmt.Errorf("no acl resource pattern with name %s", normalized) + } + *a = arpt + return nil +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 00000000000..a96af934178 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,57 @@ +package sarama + +// AddOffsetsToTxnRequest adds offsets to a transaction request +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 00000000000..bb61973d16b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,49 @@ +package sarama + +import ( + "time" +) + +// AddOffsetsToTxnResponse is a response type for adding offsets to txns +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 00000000000..57ecf64884d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,81 @@ +package sarama + +// AddPartitionsToTxnRequest is a add paartition request +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 00000000000..09895650767 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,114 @@ +package sarama + +import ( + "time" +) + +// AddPartitionsToTxnResponse is a partition errors to transaction type +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +// PartitionError is a partition error type +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go new file mode 100644 index 00000000000..abe18b19f04 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -0,0 +1,1005 @@ +package sarama + +import ( + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "time" +) + +// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, +// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. +// Methods with stricter requirements will specify the minimum broker version required. +// You MUST call Close() on a client to avoid leaks +type ClusterAdmin interface { + // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher. + // It may take several seconds after CreateTopic returns success for all the brokers + // to become aware that the topic has been created. During this time, listTopics + // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. + CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error + + // List the topics available in the cluster with the default options. + ListTopics() (map[string]TopicDetail, error) + + // Describe some topics in the cluster. + DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) + + // Delete a topic. It may take several seconds after the DeleteTopic to returns success + // and for all the brokers to become aware that the topics are gone. + // During this time, listTopics may continue to return information about the deleted topic. + // If delete.topic.enable is false on the brokers, deleteTopic will mark + // the topic for deletion, but not actually delete them. + // This operation is supported by brokers with version 0.10.1.0 or higher. + DeleteTopic(topic string) error + + // Increase the number of partitions of the topics according to the corresponding values. + // If partitions are increased for a topic that has a key, the partition logic or ordering of + // the messages will be affected. It may take several seconds after this method returns + // success for all the brokers to become aware that the partitions have been created. + // During this time, ClusterAdmin#describeTopics may not return information about the + // new partitions. This operation is supported by brokers with version 1.0.0 or higher. + CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + + // Delete records whose offset is smaller than the given offset of the corresponding partition. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteRecords(topic string, partitionOffsets map[int32]int64) error + + // Get the configuration for the specified resources. + // The returned configuration includes default values and the Default is true + // can be used to distinguish them from user supplied values. + // Config entries where ReadOnly is true cannot be updated. + // The value of config entries where Sensitive is true is always nil so + // sensitive information is not disclosed. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) + + // Update the configuration for the specified resources with the default options. + // This operation is supported by brokers with version 0.11.0.0 or higher. + // The resources with their configs (topic is the only resource type with configs + // that can be updated currently Updates are not transactional so they may succeed + // for some resources while fail for others. The configs for a particular resource are updated automatically. + AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error + + // Creates access control lists (ACLs) which are bound to specific resources. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. + CreateACL(resource Resource, acl Acl) error + + // Lists access control lists (ACLs) according to the supplied filter. + // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls + // This operation is supported by brokers with version 0.11.0.0 or higher. + ListAcls(filter AclFilter) ([]ResourceAcls, error) + + // Deletes access control lists (ACLs) according to the supplied filters. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + + // List the consumer groups available in the cluster. + ListConsumerGroups() (map[string]string, error) + + // Describe the given consumer groups. + DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) + + // List the consumer group offsets available in the cluster. + ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + + // Delete a consumer group. + DeleteConsumerGroup(group string) error + + // Get information about the nodes in the cluster + DescribeCluster() (brokers []*Broker, controllerID int32, err error) + + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + + // Get information about SCRAM users + DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) + + // Delete SCRAM users + DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) + + // Upsert SCRAM users + UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) + + // Close shuts down the admin and closes underlying client. + Close() error +} + +type clusterAdmin struct { + client Client + conf *Config +} + +// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration. +func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + return NewClusterAdminFromClient(client) +} + +// NewClusterAdminFromClient creates a new ClusterAdmin using the given client. +// Note that underlying client will also be closed on admin's Close() call. +func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { + // make sure we can retrieve the controller + _, err := client.Controller() + if err != nil { + return nil, err + } + + ca := &clusterAdmin{ + client: client, + conf: client.Config(), + } + return ca, nil +} + +func (ca *clusterAdmin) Close() error { + return ca.client.Close() +} + +func (ca *clusterAdmin) Controller() (*Broker, error) { + return ca.client.Controller() +} + +func (ca *clusterAdmin) refreshController() (*Broker, error) { + return ca.client.RefreshController() +} + +// isErrNoController returns `true` if the given error type unwraps to an +// `ErrNotController` response from Kafka +func isErrNoController(err error) bool { + switch e := err.(type) { + case *TopicError: + return e.Err == ErrNotController + case *TopicPartitionError: + return e.Err == ErrNotController + case KError: + return e == ErrNotController + } + return false +} + +// retryOnError will repeatedly call the given (error-returning) func in the +// case that its response is non-nil and retryable (as determined by the +// provided retryable func) up to the maximum number of tries permitted by +// the admin client configuration +func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { + var err error + for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { + err = fn() + if err == nil || !retryable(err) { + return err + } + Logger.Printf( + "admin/request retrying after %dms... (%d attempts remaining)\n", + ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + time.Sleep(ca.conf.Admin.Retry.Backoff) + continue + } + return err +} + +func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + if detail == nil { + return errors.New("you must specify topic details") + } + + topicDetails := make(map[string]*TopicDetail) + topicDetails[topic] = detail + + request := &CreateTopicsRequest{ + TopicDetails: topicDetails, + ValidateOnly: validateOnly, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 2 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreateTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, err + } + + request := &MetadataRequest{ + Topics: topics, + AllowAutoTopicCreation: false, + } + + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 5 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, err + } + return response.Topics, nil +} + +func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, int32(0), err + } + + request := &MetadataRequest{ + Topics: []string{}, + } + + if ca.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 1 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, int32(0), err + } + + return response.Brokers, response.ControllerID, nil +} + +func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { + brokers := ca.client.Brokers() + for _, b := range brokers { + if b.ID() == id { + return b, nil + } + } + return nil, fmt.Errorf("could not find broker id %d", id) +} + +func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { + brokers := ca.client.Brokers() + if len(brokers) > 0 { + index := rand.Intn(len(brokers)) + return brokers[index], nil + } + return nil, errors.New("no available broker") +} + +func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { + // In order to build TopicDetails we need to first get the list of all + // topics using a MetadataRequest and then get their configs using a + // DescribeConfigsRequest request. To avoid sending many requests to the + // broker, we use a single DescribeConfigsRequest. + + // Send the all-topic MetadataRequest + b, err := ca.findAnyBroker() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + metadataReq := &MetadataRequest{} + metadataResp, err := b.GetMetadata(metadataReq) + if err != nil { + return nil, err + } + + topicsDetailsMap := make(map[string]TopicDetail) + + var describeConfigsResources []*ConfigResource + + for _, topic := range metadataResp.Topics { + topicDetails := TopicDetail{ + NumPartitions: int32(len(topic.Partitions)), + } + if len(topic.Partitions) > 0 { + topicDetails.ReplicaAssignment = map[int32][]int32{} + for _, partition := range topic.Partitions { + topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas + } + topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) + } + topicsDetailsMap[topic.Name] = topicDetails + + // we populate the resources we want to describe from the MetadataResponse + topicResource := ConfigResource{ + Type: TopicResource, + Name: topic.Name, + } + describeConfigsResources = append(describeConfigsResources, &topicResource) + } + + // Send the DescribeConfigsRequest + describeConfigsReq := &DescribeConfigsRequest{ + Resources: describeConfigsResources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + describeConfigsReq.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + describeConfigsReq.Version = 2 + } + + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) + if err != nil { + return nil, err + } + + for _, resource := range describeConfigsResp.Resources { + topicDetails := topicsDetailsMap[resource.Name] + topicDetails.ConfigEntries = make(map[string]*string) + + for _, entry := range resource.Configs { + // only include non-default non-sensitive config + // (don't actually think topic config will ever be sensitive) + if entry.Default || entry.Sensitive { + continue + } + topicDetails.ConfigEntries[entry.Name] = &entry.Value + } + + topicsDetailsMap[resource.Name] = topicDetails + } + + return topicsDetailsMap, nil +} + +func (ca *clusterAdmin) DeleteTopic(topic string) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &DeleteTopicsRequest{ + Topics: []string{topic}, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.DeleteTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrorCodes[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr != ErrNoError { + if topicErr == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + topicPartitions := make(map[string]*TopicPartition) + topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment} + + request := &CreatePartitionsRequest{ + TopicPartitions: topicPartitions, + Timeout: ca.conf.Admin.Timeout, + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreatePartitions(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicPartitionErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} + +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { + if topic == "" { + return nil, ErrInvalidTopic + } + + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + request.AddBlock(topic, partitions) + + b, err := ca.Controller() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err + } +} + +func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + if topic == "" { + return ErrInvalidTopic + } + partitionPerBroker := make(map[*Broker][]int32) + for partition := range partitionOffsets { + broker, err := ca.client.Leader(topic, partition) + if err != nil { + return err + } + partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) + } + errs := make([]error, 0) + for broker, partitions := range partitionPerBroker { + topics := make(map[string]*DeleteRecordsRequestTopic) + recordsToDelete := make(map[int32]int64) + for _, p := range partitions { + recordsToDelete[p] = partitionOffsets[p] + } + topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + request := &DeleteRecordsRequest{ + Topics: topics, + Timeout: ca.conf.Admin.Timeout, + } + + rsp, err := broker.DeleteRecords(request) + if err != nil { + errs = append(errs, err) + } else { + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + } else { + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if deleteRecordsResponsePartition.Err != ErrNoError { + errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) + } + } + } + } + } + if len(errs) > 0 { + return ErrDeleteRecords{MultiError{&errs}} + } + // todo since we are dealing with couple of partitions it would be good if we return slice of errors + // for each partition instead of one error + return nil +} + +// Returns a bool indicating whether the resource request needs to go to a +// specific broker +func dependsOnSpecificNode(resource ConfigResource) bool { + return (resource.Type == BrokerResource && resource.Name != "") || + resource.Type == BrokerLoggerResource +} + +func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { + var entries []ConfigEntry + var resources []*ConfigResource + resources = append(resources, &resource) + + request := &DescribeConfigsRequest{ + Resources: resources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } + + var ( + b *Broker + err error + ) + + // DescribeConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(resource) { + var id int64 + id, err = strconv.ParseInt(resource.Name, 10, 32) + if err != nil { + return nil, err + } + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return nil, err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.DescribeConfigs(request) + if err != nil { + return nil, err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == resource.Name { + if rspResource.ErrorMsg != "" { + return nil, errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } + for _, cfgEntry := range rspResource.Configs { + entries = append(entries, *cfgEntry) + } + } + } + return entries, nil +} + +func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { + var resources []*AlterConfigsResource + resources = append(resources, &AlterConfigsResource{ + Type: resourceType, + Name: name, + ConfigEntries: entries, + }) + + request := &AlterConfigsRequest{ + Resources: resources, + ValidateOnly: validateOnly, + } + + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + var id int64 + id, err = strconv.ParseInt(name, 10, 32) + if err != nil { + return err + } + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.AlterConfigs(request) + if err != nil { + return err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == name { + if rspResource.ErrorMsg != "" { + return errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } + } + } + return nil +} + +func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { + var acls []*AclCreation + acls = append(acls, &AclCreation{resource, acl}) + request := &CreateAclsRequest{AclCreations: acls} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return err + } + + _, err = b.CreateAcls(request) + return err +} + +func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { + request := &DescribeAclsRequest{AclFilter: filter} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeAcls(request) + if err != nil { + return nil, err + } + + var lAcls []ResourceAcls + for _, rAcl := range rsp.ResourceAcls { + lAcls = append(lAcls, *rAcl) + } + return lAcls, nil +} + +func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) { + var filters []*AclFilter + filters = append(filters, &filter) + request := &DeleteAclsRequest{Filters: filters} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DeleteAcls(request) + if err != nil { + return nil, err + } + + var mAcls []MatchingAcl + for _, fr := range rsp.FilterResponses { + for _, mACL := range fr.MatchingAcls { + mAcls = append(mAcls, *mACL) + } + } + return mAcls, nil +} + +func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { + groupsPerBroker := make(map[*Broker][]string) + + for _, group := range groups { + controller, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + } + + for broker, brokerGroups := range groupsPerBroker { + response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + Groups: brokerGroups, + }) + if err != nil { + return nil, err + } + + result = append(result, response.Groups...) + } + return result, nil +} + +func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { + allGroups = make(map[string]string) + + // Query brokers in parallel, since we have to query *all* brokers + brokers := ca.client.Brokers() + groupMaps := make(chan map[string]string, len(brokers)) + errChan := make(chan error, len(brokers)) + wg := sync.WaitGroup{} + + for _, b := range brokers { + wg.Add(1) + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.ListGroups(&ListGroupsRequest{}) + if err != nil { + errChan <- err + return + } + + groups := make(map[string]string) + for group, typ := range response.Groups { + groups[group] = typ + } + + groupMaps <- groups + }(b, ca.conf) + } + + wg.Wait() + close(groupMaps) + close(errChan) + + for groupMap := range groupMaps { + for group, protocolType := range groupMap { + allGroups[group] = protocolType + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} + +func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: topicPartitions, + } + + if ca.conf.Version.IsAtLeast(V0_10_2_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { + request.Version = 1 + } + + return coordinator.FetchOffset(request) +} + +func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteGroupsRequest{ + Groups: []string{group}, + } + + resp, err := coordinator.DeleteGroups(request) + if err != nil { + return err + } + + groupErr, ok := resp.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if groupErr != ErrNoError { + return groupErr + } + + return nil +} + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errChan := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errChan <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errChan) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errChan + return +} + +func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) { + req := &DescribeUserScramCredentialsRequest{} + for _, u := range users { + req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{ + Name: u, + }) + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeUserScramCredentials(req) + if err != nil { + return nil, err + } + + return rsp.Results, nil +} + +func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) { + res, err := ca.AlterUserScramCredentials(upsert, nil) + if err != nil { + return nil, err + } + + return res, nil +} + +func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { + res, err := ca.AlterUserScramCredentials(nil, delete) + if err != nil { + return nil, err + } + + return res, nil +} + +func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) { + req := &AlterUserScramCredentialsRequest{ + Deletions: d, + Upsertions: u, + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.AlterUserScramCredentials(req) + if err != nil { + return nil, err + } + + return rsp.Results, nil +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 00000000000..8b94b1f3fe4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,126 @@ +package sarama + +// AlterConfigsRequest is an alter config request type +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +// AlterConfigsResource is an alter config resource type +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (a *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, r := range a.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(a.ValidateOnly) + return nil +} + +func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range a.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + a.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + a.ValidateOnly = validateOnly + + return nil +} + +func (a *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) + + if err := pe.putString(a.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range a.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + a.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (a *AlterConfigsRequest) key() int16 { + return 33 +} + +func (a *AlterConfigsRequest) version() int16 { + return 0 +} + +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 00000000000..cfb6369ac1d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,116 @@ +package sarama + +import "time" + +// AlterConfigsResponse is a response type for alter config +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +// AlterConfigsResourceResponse is a response type for alter config resource +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (a *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, v := range a.Resources { + if err := v.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) + + if err := a.Resources[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error { + pe.putInt16(a.ErrorCode) + err := pe.putString(a.ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(a.Type)) + err = pe.putString(a.Name) + if err != nil { + return nil + } + return nil +} + +func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + a.ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + return nil +} + +func (a *AlterConfigsResponse) key() int16 { + return 32 +} + +func (a *AlterConfigsResponse) version() int16 { + return 0 +} + +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go new file mode 100644 index 00000000000..f0a2f9dd59b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go new file mode 100644 index 00000000000..b3f9a15fe7f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go new file mode 100644 index 00000000000..0530d8946a8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go @@ -0,0 +1,142 @@ +package sarama + +type AlterUserScramCredentialsRequest struct { + Version int16 + + // Deletions represent list of SCRAM credentials to remove + Deletions []AlterUserScramCredentialsDelete + + // Upsertions represent list of SCRAM credentials to update/insert + Upsertions []AlterUserScramCredentialsUpsert +} + +type AlterUserScramCredentialsDelete struct { + Name string + Mechanism ScramMechanismType +} + +type AlterUserScramCredentialsUpsert struct { + Name string + Mechanism ScramMechanismType + Iterations int32 + Salt []byte + saltedPassword []byte + + // This field is never transmitted over the wire + // @see: https://tools.ietf.org/html/rfc5802 + Password []byte +} + +func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error { + pe.putCompactArrayLength(len(r.Deletions)) + for _, d := range r.Deletions { + if err := pe.putCompactString(d.Name); err != nil { + return err + } + pe.putInt8(int8(d.Mechanism)) + pe.putEmptyTaggedFieldArray() + } + + pe.putCompactArrayLength(len(r.Upsertions)) + for _, u := range r.Upsertions { + if err := pe.putCompactString(u.Name); err != nil { + return err + } + pe.putInt8(int8(u.Mechanism)) + pe.putInt32(u.Iterations) + + if err := pe.putCompactBytes(u.Salt); err != nil { + return err + } + + // do not transmit the password over the wire + formatter := scramFormatter{mechanism: u.Mechanism} + salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations)) + if err != nil { + return err + } + + if err := pe.putCompactBytes(salted); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { + numDeletions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions) + for i := 0; i < numDeletions; i++ { + r.Deletions[i] = AlterUserScramCredentialsDelete{} + if r.Deletions[i].Name, err = pd.getCompactString(); err != nil { + return err + } + mechanism, err := pd.getInt8() + if err != nil { + return err + } + r.Deletions[i].Mechanism = ScramMechanismType(mechanism) + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + numUpsertions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions) + for i := 0; i < numUpsertions; i++ { + r.Upsertions[i] = AlterUserScramCredentialsUpsert{} + if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil { + return err + } + mechanism, err := pd.getInt8() + if err != nil { + return err + } + + r.Upsertions[i].Mechanism = ScramMechanismType(mechanism) + if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil { + return err + } + if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil { + return err + } + if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *AlterUserScramCredentialsRequest) key() int16 { + return 51 +} + +func (r *AlterUserScramCredentialsRequest) version() int16 { + return r.Version +} + +func (r *AlterUserScramCredentialsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go new file mode 100644 index 00000000000..31e167b5eb7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type AlterUserScramCredentialsResponse struct { + Version int16 + + ThrottleTime time.Duration + + Results []*AlterUserScramCredentialsResult +} + +type AlterUserScramCredentialsResult struct { + User string + + ErrorCode KError + ErrorMessage *string +} + +func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + pe.putCompactArrayLength(len(r.Results)) + + for _, u := range r.Results { + if err := pe.putCompactString(u.User); err != nil { + return err + } + pe.putInt16(int16(u.ErrorCode)) + if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + numResults, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numResults > 0 { + r.Results = make([]*AlterUserScramCredentialsResult, numResults) + for i := 0; i < numResults; i++ { + r.Results[i] = &AlterUserScramCredentialsResult{} + if r.Results[i].User, err = pd.getCompactString(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Results[i].ErrorCode = KError(kerr) + if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *AlterUserScramCredentialsResponse) key() int16 { + return 51 +} + +func (r *AlterUserScramCredentialsResponse) version() int16 { + return r.Version +} + +func (r *AlterUserScramCredentialsResponse) headerVersion() int16 { + return 2 +} + +func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 00000000000..bee92c0e7f5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,28 @@ +package sarama + +// ApiVersionsRequest ... +type ApiVersionsRequest struct{} + +func (a *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (a *ApiVersionsRequest) key() int16 { + return 18 +} + +func (a *ApiVersionsRequest) version() int16 { + return 0 +} + +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + +func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 00000000000..0e72e3926a9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,93 @@ +package sarama + +// ApiVersionsResponseBlock is an api version response block type +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +// ApiVersionsResponse is an api version response type +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 00000000000..5911f7b58cf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,1161 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when Return.Successes is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 +) + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + } + + if conf.Producer.Idempotent { + initProducerIDResponse, err := client.InitProducerID() + if err != nil { + return nil, err + } + txnmgr.producerID = initProducerIDResponse.ProducerID + txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + +type asyncProducer struct { + client Client + conf *Config + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]*brokerProducer + brokerRefs map[*brokerProducer]int + brokerLock sync.Mutex + + txnmgr *transactionManager +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + return newAsyncProducer(client) +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newAsyncProducer(cli) +} + +func newAsyncProducer(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + txnmgr, err := newTransactionManager(client.Config(), client) + if err != nil { + return nil, err + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp can vary in behaviour depending on broker configuration, being + // in either one of the CreateTime or LogAppendTime modes (default CreateTime), + // and requiring version at least 0.10.0. + // + // When configured to CreateTime, the timestamp is specified by the producer + // either by explicitly setting this field, or when the message is added + // to a produce set. + // + // When configured to LogAppendTime, the timestamp assigned to the message + // by the broker. This is only guaranteed to be defined if the message was + // successfully delivered and RequiredAcks is not NoResponse. + Timestamp time.Time + + retries int + flags flagSet + expectation chan *ProducerError + sequenceNumber int32 + producerEpoch int16 + hasSequence bool +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +func (pe ProducerError) Unwrap() error { + return pe.Err +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } else { + <-p.errors + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + for _, interceptor := range p.conf.Producer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } + + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } else if msg.Headers != nil { + p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) + continue + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + requiresConsistency := false + if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { + requiresConsistency = ep.MessageRequiresConsistency(msg) + } else { + requiresConsistency = tp.partitioner.RequiresConsistency() + } + + if requiresConsistency { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + brokerProducer *brokerProducer + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) backoff(retries int) { + var backoff time.Duration + if pp.parent.conf.Producer.Retry.BackoffFunc != nil { + maxRetries := pp.parent.conf.Producer.Retry.Max + backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) + } else { + backoff = pp.parent.conf.Producer.Retry.Backoff + } + if backoff > 0 { + time.Sleep(backoff) + } +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + defer func() { + if pp.brokerProducer != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + } + }() + + for msg := range pp.input { + if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { + select { + case <-pp.brokerProducer.abandoned: + // a message on the abandoned channel means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + default: + // producer connection is still open. + } + } + + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + pp.backoff(msg.retries) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + pp.backoff(msg.retries) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } + + pp.brokerProducer.input <- msg + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.brokerProducer.input <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + stopchan: make(chan struct{}), + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + if p.conf.Producer.Retry.Max <= 0 { + bp.abandoned = make(chan struct{}) + } + + return bp +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + abandoned chan struct{} + stopchan chan struct{} + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg, ok := <-bp.input: + if !ok { + Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID()) + bp.shutdown() + return + } + + if msg == nil { + continue + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response, ok := <-bp.responses: + if ok { + bp.handleResponse(response) + } + case <-bp.stopchan: + Logger.Printf( + "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) + return + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + close(bp.stopchan) + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + var retryTopics []string + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(pSet.msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range pSet.msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range pSet.msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(pSet.msgs) + // Duplicate + case ErrDuplicateSequenceNumber: + bp.parent.returnSuccesses(pSet.msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + bp.parent.returnErrors(pSet.msgs, block.Err) + } else { + retryTopics = append(retryTopics, topic) + } + // Other non-retriable errors + default: + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + } + bp.parent.returnErrors(pSet.msgs, block.Err) + } + }) + + if len(retryTopics) > 0 { + if bp.parent.conf.Producer.Idempotent { + err := bp.parent.client.RefreshMetadata(retryTopics...) + if err != nil { + Logger.Printf("Failed refreshing metadata because of %v\n", err) + } + } + + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + block := response.GetBlock(topic, partition) + if block == nil { + // handled in the previous "eachPartition" loop + return + } + + switch block.Err { + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + if bp.currentRetries[topic] == nil { + bp.currentRetries[topic] = make(map[int32]error) + } + bp.currentRetries[topic][partition] = block.Err + if bp.parent.conf.Producer.Idempotent { + go bp.parent.retryBatch(topic, partition, pSet, block.Err) + } else { + bp.parent.retryMessages(pSet.msgs, block.Err) + } + // dropping the following messages has the side effect of incrementing their retry count + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + } + }) + } +} + +func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) { + Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr) + produceSet := newProduceSet(p) + produceSet.msgs[topic] = make(map[int32]*partitionSet) + produceSet.msgs[topic][partition] = pSet + produceSet.bufferBytes += pSet.bufferBytes + produceSet.bufferCount += len(pSet.msgs) + for _, msg := range pSet.msgs { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, kerr) + return + } + msg.retries++ + } + + // it's expected that a metadata refresh has been requested prior to calling retryBatch + leader, err := p.client.Leader(topic, partition) + if err != nil { + Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err) + for _, msg := range pSet.msgs { + p.returnError(msg, kerr) + } + return + } + bp := p.getBrokerProducer(leader) + bp.output <- produceSet +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.returnErrors(pSet.msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp.input) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bc, ok := p.brokers[broker] + if ok && bc.abandoned != nil { + close(bc.abandoned) + } + + delete(p.brokers, broker) +} diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go new file mode 100644 index 00000000000..9855bf44398 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -0,0 +1,1136 @@ +package sarama + +import ( + "container/heap" + "errors" + "fmt" + "math" + "sort" + "strings" +) + +const ( + // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy + RangeBalanceStrategyName = "range" + + // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy + RoundRobinBalanceStrategyName = "roundrobin" + + // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy + StickyBalanceStrategyName = "sticky" + + defaultGeneration = -1 +) + +// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. +// It contains an allocation of topic/partitions by memberID in the form of +// a `memberID -> topic -> partitions` map. +type BalanceStrategyPlan map[string]map[string][]int32 + +// Add assigns a topic with a number partitions to a member. +func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { + if len(partitions) == 0 { + return + } + if _, ok := p[memberID]; !ok { + p[memberID] = make(map[string][]int32, 1) + } + p[memberID][topic] = append(p[memberID][topic], partitions...) +} + +// -------------------------------------------------------------------- + +// BalanceStrategy is used to balance topics and partitions +// across members of a consumer group +type BalanceStrategy interface { + // Name uniquely identifies the strategy. + Name() string + + // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` + // and returns a distribution plan. + Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) + + // AssignmentData returns the serialized assignment data for the specified + // memberID + AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) +} + +// -------------------------------------------------------------------- + +// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// Example with one topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 1, 2]} +// M2: {T: [3, 4, 5]} +var BalanceStrategyRange = &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + step := float64(len(partitions)) / float64(len(memberIDs)) + + for i, memberID := range memberIDs { + pos := float64(i) + min := int(math.Floor(pos*step + 0.5)) + max := int(math.Floor((pos+1)*step + 0.5)) + plan.Add(memberID, topic, partitions[min:max]...) + } + }, +} + +// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// while maintain a balanced partition distribution. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +// +// On reassignment with an additional consumer, you might get an assignment plan like: +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} +// +var BalanceStrategySticky = &stickyBalanceStrategy{} + +// -------------------------------------------------------------------- + +type balanceStrategy struct { + name string + coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) +} + +// Name implements BalanceStrategy. +func (s *balanceStrategy) Name() string { return s.name } + +// Plan implements BalanceStrategy. +func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // Build members by topic map + mbt := make(map[string][]string) + for memberID, meta := range members { + for _, topic := range meta.Topics { + mbt[topic] = append(mbt[topic], memberID) + } + } + + // Sort members for each topic + for topic, memberIDs := range mbt { + sort.Sort(&balanceStrategySortable{ + topic: topic, + memberIDs: memberIDs, + }) + } + + // Assemble plan + plan := make(BalanceStrategyPlan, len(members)) + for topic, memberIDs := range mbt { + s.coreFn(plan, memberIDs, topic, topics[topic]) + } + return plan, nil +} + +// AssignmentData simple strategies do not require any shared assignment data +func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil +} + +type balanceStrategySortable struct { + topic string + memberIDs []string +} + +func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } +func (p balanceStrategySortable) Swap(i, j int) { + p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] +} + +func (p balanceStrategySortable) Less(i, j int) bool { + return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) +} + +func balanceStrategyHashValue(vv ...string) uint32 { + h := uint32(2166136261) + for _, s := range vv { + for _, c := range s { + h ^= uint32(c) + h *= 16777619 + } + } + return h +} + +type stickyBalanceStrategy struct { + movements partitionMovements +} + +// Name implements BalanceStrategy. +func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName } + +// Plan implements BalanceStrategy. +func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // track partition movements during generation of the partition assignment plan + s.movements = partitionMovements{ + Movements: make(map[topicPartitionAssignment]consumerPair), + PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool), + } + + // prepopulate the current assignment state from userdata on the consumer group members + currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members) + if err != nil { + return nil, err + } + + // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state + isFreshAssignment := false + if len(currentAssignment) == 0 { + isFreshAssignment = true + } + + // create a mapping of all current topic partitions and the consumers that can be assigned to them + partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) + for topic, partitions := range topics { + for _, partition := range partitions { + partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{} + } + } + + // create a mapping of all consumers to all potential topic partitions that can be assigned to them + // also, populate the mapping of partitions to potential consumers + consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members)) + for memberID, meta := range members { + consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0) + for _, topicSubscription := range meta.Topics { + // only evaluate topic subscriptions that are present in the supplied topics map + if _, found := topics[topicSubscription]; found { + for _, partition := range topics[topicSubscription] { + topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition} + consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition) + partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID) + } + } + } + + // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist + if _, exists := currentAssignment[memberID]; !exists { + currentAssignment[memberID] = make([]topicPartitionAssignment, 0) + } + } + + // create a mapping of each partition to its current consumer, where possible + currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment)) + unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unvisitedPartitions[partition] = true + } + var unassignedPartitions []topicPartitionAssignment + for memberID, partitions := range currentAssignment { + var keepPartitions []topicPartitionAssignment + for _, partition := range partitions { + // If this partition no longer exists at all, likely due to the + // topic being deleted, we remove the partition from the member. + if _, exists := partition2AllPotentialConsumers[partition]; !exists { + continue + } + delete(unvisitedPartitions, partition) + currentPartitionConsumers[partition] = memberID + + if !strsContains(members[memberID].Topics, partition.Topic) { + unassignedPartitions = append(unassignedPartitions, partition) + continue + } + keepPartitions = append(keepPartitions, partition) + } + currentAssignment[memberID] = keepPartitions + } + for unvisited := range unvisitedPartitions { + unassignedPartitions = append(unassignedPartitions, unvisited) + } + + // sort the topic partitions in order of priority for reassignment + sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions) + + // at this point we have preserved all valid topic partition to consumer assignments and removed + // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions + // to consumers so that the topic partition assignments are as balanced as possible. + + // an ascending sorted set of consumers based on how many topic partitions are already assigned to them + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers) + + // Assemble plan + plan := make(BalanceStrategyPlan, len(currentAssignment)) + for memberID, assignments := range currentAssignment { + if len(assignments) == 0 { + plan[memberID] = make(map[string][]int32) + } else { + for _, assignment := range assignments { + plan.Add(memberID, assignment.Topic, assignment.Partition) + } + } + } + return plan, nil +} + +// AssignmentData serializes the set of topics currently assigned to the +// specified member as part of the supplied balance plan +func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return encode(&StickyAssignorUserDataV1{ + Topics: topics, + Generation: generationID, + }, nil) +} + +func strsContains(s []string, value string) bool { + for _, entry := range s { + if entry == value { + return true + } + } + return false +} + +// Balance assignments across consumers for maximum fairness and stickiness. +func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { + initializing := false + if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { + initializing = true + } + + // assign all unassigned partitions + for _, partition := range unassignedPartitions { + // skip if there is no potential consumer for the partition + if len(partition2AllPotentialConsumers[partition]) == 0 { + continue + } + sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer) + } + + // narrow down the reassignment scope to only those partitions that can actually be reassigned + for partition := range partition2AllPotentialConsumers { + if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition) + } + } + + // narrow down the reassignment scope to only those consumers that are subject to reassignment + fixedAssignments := make(map[string][]topicPartitionAssignment) + for memberID := range consumer2AllPotentialPartitions { + if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) { + fixedAssignments[memberID] = currentAssignment[memberID] + delete(currentAssignment, memberID) + sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) + } + } + + // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later + preBalanceAssignment := deepCopyAssignment(currentAssignment) + preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer)) + for k, v := range currentPartitionConsumer { + preBalancePartitionConsumers[k] = v + } + + reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer) + + // if we are not preserving existing assignments and we have made changes to the current assignment + // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment + if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) { + currentAssignment = deepCopyAssignment(preBalanceAssignment) + currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers)) + for k, v := range preBalancePartitionConsumers { + currentPartitionConsumer[k] = v + } + } + + // add the fixed assignments (those that could not change) back + for consumer, assignments := range fixedAssignments { + currentAssignment[consumer] = assignments + } +} + +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): +// M0: [t0p0, t0p2, t1p1] +// M1: [t0p1, t1p0, t1p2] +var BalanceStrategyRoundRobin = new(roundRobinBalancer) + +type roundRobinBalancer struct{} + +func (b *roundRobinBalancer) Name() string { + return RoundRobinBalanceStrategyName +} + +func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + if len(memberAndMetadata) == 0 || len(topics) == 0 { + return nil, errors.New("members and topics are not provided") + } + // sort partitions + var topicPartitions []topicAndPartition + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition}) + } + } + sort.SliceStable(topicPartitions, func(i, j int) bool { + pi := topicPartitions[i] + pj := topicPartitions[j] + return pi.comparedValue() < pj.comparedValue() + }) + + // sort members + var members []memberAndTopic + for memberID, meta := range memberAndMetadata { + m := memberAndTopic{ + memberID: memberID, + topics: make(map[string]struct{}), + } + for _, t := range meta.Topics { + m.topics[t] = struct{}{} + } + members = append(members, m) + } + sort.SliceStable(members, func(i, j int) bool { + mi := members[i] + mj := members[j] + return mi.memberID < mj.memberID + }) + + // assign partitions + plan := make(BalanceStrategyPlan, len(members)) + i := 0 + n := len(members) + for _, tp := range topicPartitions { + m := members[i%n] + for !m.hasTopic(tp.topic) { + i++ + m = members[i%n] + } + plan.Add(m.memberID, tp.topic, tp.partition) + i++ + } + return plan, nil +} + +func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil // do nothing for now +} + +type topicAndPartition struct { + topic string + partition int32 +} + +func (tp *topicAndPartition) comparedValue() string { + return fmt.Sprintf("%s-%d", tp.topic, tp.partition) +} + +type memberAndTopic struct { + memberID string + topics map[string]struct{} +} + +func (m *memberAndTopic) hasTopic(topic string) bool { + _, isExist := m.topics[topic] + return isExist +} + +// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. +// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. +// Lower balance score indicates a more balanced assignment. +func getBalanceScore(assignment map[string][]topicPartitionAssignment) int { + consumer2AssignmentSize := make(map[string]int, len(assignment)) + for memberID, partitions := range assignment { + consumer2AssignmentSize[memberID] = len(partitions) + } + + var score float64 + for memberID, consumerAssignmentSize := range consumer2AssignmentSize { + delete(consumer2AssignmentSize, memberID) + for _, otherConsumerAssignmentSize := range consumer2AssignmentSize { + score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize)) + } + } + return int(score) +} + +// Determine whether the current assignment plan is balanced. +func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool { + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + min := len(currentAssignment[sortedCurrentSubscriptions[0]]) + max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]]) + if min >= max-1 { + // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true + return true + } + + // create a mapping from partitions to the consumer assigned to them + allPartitions := make(map[topicPartitionAssignment]string) + for memberID, partitions := range currentAssignment { + for _, partition := range partitions { + if _, exists := allPartitions[partition]; exists { + Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition) + } + allPartitions[partition] = memberID + } + } + + // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it + // could but did not get cannot be moved to it (because that would break the balance) + for _, memberID := range sortedCurrentSubscriptions { + consumerPartitions := currentAssignment[memberID] + consumerPartitionCount := len(consumerPartitions) + + // skip if this consumer already has all the topic partitions it can get + if consumerPartitionCount == len(allSubscriptions[memberID]) { + continue + } + + // otherwise make sure it cannot get any more + potentialTopicPartitions := allSubscriptions[memberID] + for _, partition := range potentialTopicPartitions { + if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) { + otherConsumer := allPartitions[partition] + otherConsumerPartitionCount := len(currentAssignment[otherConsumer]) + if consumerPartitionCount < otherConsumerPartitionCount { + return false + } + } + } + } + return true +} + +// Reassign all topic partitions that need reassignment until balanced. +func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool { + reassignmentPerformed := false + modified := false + + // repeat reassignment until no partition can be moved to improve the balance + for { + modified = false + // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) + // until the full list is processed or a balance is achieved + for _, partition := range reassignablePartitions { + if isBalanced(currentAssignment, consumer2AllPotentialPartitions) { + break + } + + // the partition must have at least two consumers + if len(partition2AllPotentialConsumers[partition]) <= 1 { + Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition) + } + + // the partition must have a consumer + consumer := currentPartitionConsumer[partition] + if consumer == "" { + Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition) + } + + if _, exists := prevAssignment[partition]; exists { + if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) { + sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID) + reassignmentPerformed = true + modified = true + continue + } + } + + // check if a better-suited consumer exists for the partition; if so, reassign it + for _, otherConsumer := range partition2AllPotentialConsumers[partition] { + if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) { + sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions) + reassignmentPerformed = true + modified = true + break + } + } + } + if !modified { + return reassignmentPerformed + } + } +} + +// Identify a new consumer for a topic partition and reassign it. +func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string { + for _, anotherConsumer := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) { + return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer) + } + } + return sortedCurrentSubscriptions +} + +// Reassign a specific partition to a new consumer +func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string { + consumer := currentPartitionConsumer[partition] + // find the correct partition movement considering the stickiness requirement + partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer) + return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer) +} + +// Track the movement of a topic partition after assignment +func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + oldConsumer := currentPartitionConsumer[partition] + s.movements.movePartition(partition, oldConsumer, newConsumer) + + currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition) + currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition) + currentPartitionConsumer[partition] = newConsumer + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Determine whether a specific consumer should be considered for topic partition assignment. +func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + currentPartitions := currentAssignment[memberID] + currentAssignmentSize := len(currentPartitions) + maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID]) + if currentAssignmentSize > maxAssignmentSize { + Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID) + } + if currentAssignmentSize < maxAssignmentSize { + // if a consumer is not assigned all its potential partitions it is subject to reassignment + return true + } + for _, partition := range currentPartitions { + if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + return true + } + } + return false +} + +// Only consider reassigning those topic partitions that have two or more potential consumers. +func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + return len(partition2AllPotentialConsumers[partition]) >= 2 +} + +// The assignment should improve the overall balance of the partition assignments to consumers. +func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + for _, memberID := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) { + currentAssignment[memberID] = append(currentAssignment[memberID], partition) + currentPartitionConsumer[partition] = memberID + break + } + } + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Deserialize topic partition assignment data to aid with creation of a sticky assignment. +func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { + userDataV1 := &StickyAssignorUserDataV1{} + if err := decode(userDataBytes, userDataV1); err != nil { + userDataV0 := &StickyAssignorUserDataV0{} + if err := decode(userDataBytes, userDataV0); err != nil { + return nil, err + } + return userDataV0, nil + } + return userDataV1, nil +} + +// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited +// to those topic partitions currently reported by the Kafka cluster. +func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment { + assignments := deepCopyAssignment(currentAssignment) + for memberID, partitions := range assignments { + // perform in-place filtering + i := 0 + for _, partition := range partitions { + if _, exists := partition2AllPotentialConsumers[partition]; exists { + partitions[i] = partition + i++ + } + } + assignments[memberID] = partitions[:i] + } + return assignments +} + +func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment { + for i, assignment := range assignments { + if assignment == topic { + return append(assignments[:i], assignments[i+1:]...) + } + } + return assignments +} + +func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool { + for _, assignment := range assignments { + if assignment == topic { + return true + } + } + return false +} + +func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment { + unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unassignedPartitions[partition] = true + } + + sortedPartitions := make([]topicPartitionAssignment, 0) + if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) { + // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics) + // then we just need to simply list partitions in a round robin fashion (from consumers with + // most assigned partitions to those with least) + assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers) + + // use priority-queue to evaluate consumer group members in descending-order based on + // the number of topic partition assignments (i.e. consumers with most assignments first) + pq := make(assignmentPriorityQueue, len(assignments)) + i := 0 + for consumerID, consumerAssignments := range assignments { + pq[i] = &consumerGroupMember{ + id: consumerID, + assignments: consumerAssignments, + } + i++ + } + heap.Init(&pq) + + for { + // loop until no consumer-group members remain + if pq.Len() == 0 { + break + } + member := pq[0] + + // partitions that were assigned to a different consumer last time + var prevPartitionIndex int + for i, partition := range member.assignments { + if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists { + prevPartitionIndex = i + break + } + } + + if len(member.assignments) > 0 { + partition := member.assignments[prevPartitionIndex] + sortedPartitions = append(sortedPartitions, partition) + delete(unassignedPartitions, partition) + if prevPartitionIndex == 0 { + member.assignments = member.assignments[1:] + } else { + member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...) + } + heap.Fix(&pq, 0) + } else { + heap.Pop(&pq) + } + } + + for partition := range unassignedPartitions { + sortedPartitions = append(sortedPartitions, partition) + } + } else { + // an ascending sorted set of topic partitions based on how many consumers can potentially use them + sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers) + } + return sortedPartitions +} + +func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string { + // sort the members by the number of partition assignments in ascending order + sortedMemberIDs := make([]string, 0, len(assignments)) + for memberID := range assignments { + sortedMemberIDs = append(sortedMemberIDs, memberID) + } + sort.SliceStable(sortedMemberIDs, func(i, j int) bool { + ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]]) + if ret == 0 { + return sortedMemberIDs[i] < sortedMemberIDs[j] + } + return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]]) + }) + return sortedMemberIDs +} + +func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment { + // sort the members by the number of partition assignments in descending order + sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers)) + i := 0 + for partition := range partition2AllPotentialConsumers { + sortedPartionIDs[i] = partition + i++ + } + sort.Slice(sortedPartionIDs, func(i, j int) bool { + if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) { + ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic) + if ret == 0 { + return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition + } + return ret < 0 + } + return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) + }) + return sortedPartionIDs +} + +func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { + m := make(map[string][]topicPartitionAssignment, len(assignment)) + for memberID, subscriptions := range assignment { + m[memberID] = append(subscriptions[:0:0], subscriptions...) + } + return m +} + +func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool { + curMembers := make(map[string]int) + for _, cur := range partition2AllPotentialConsumers { + if len(curMembers) == 0 { + for _, curMembersElem := range cur { + curMembers[curMembersElem]++ + } + continue + } + + if len(curMembers) != len(cur) { + return false + } + + yMap := make(map[string]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curMembers { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + + curPartitions := make(map[topicPartitionAssignment]int) + for _, cur := range consumer2AllPotentialPartitions { + if len(curPartitions) == 0 { + for _, curPartitionElem := range cur { + curPartitions[curPartitionElem]++ + } + continue + } + + if len(curPartitions) != len(cur) { + return false + } + + yMap := make(map[topicPartitionAssignment]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curPartitions { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + return true +} + +// We need to process subscriptions' user data with each consumer's reported generation in mind +// higher generations overwrite lower generations in case of a conflict +// note that a conflict could exist only if user data is for different generations +func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) { + currentAssignment := make(map[string][]topicPartitionAssignment) + prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair) + + // for each partition we create a sorted map of its consumers by generation + sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string) + for memberID, meta := range members { + consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData) + if err != nil { + return nil, nil, err + } + for _, partition := range consumerUserData.partitions() { + if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists { + if consumerUserData.hasGeneration() { + if _, generationExists := consumers[consumerUserData.generation()]; generationExists { + // same partition is assigned to two consumers during the same rebalance. + // log a warning and skip this record + Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation()) + continue + } else { + consumers[consumerUserData.generation()] = memberID + } + } else { + consumers[defaultGeneration] = memberID + } + } else { + generation := defaultGeneration + if consumerUserData.hasGeneration() { + generation = consumerUserData.generation() + } + sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID} + } + } + } + + // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition + // current and previous consumers are the last two consumers of each partition in the above sorted map + for partition, consumers := range sortedPartitionConsumersByGeneration { + // sort consumers by generation in decreasing order + var generations []int + for generation := range consumers { + generations = append(generations, generation) + } + sort.Sort(sort.Reverse(sort.IntSlice(generations))) + + consumer := consumers[generations[0]] + if _, exists := currentAssignment[consumer]; !exists { + currentAssignment[consumer] = []topicPartitionAssignment{partition} + } else { + currentAssignment[consumer] = append(currentAssignment[consumer], partition) + } + + // check for previous assignment, if any + if len(generations) > 1 { + prevAssignment[partition] = consumerGenerationPair{ + MemberID: consumers[generations[1]], + Generation: generations[1], + } + } + } + return currentAssignment, prevAssignment, nil +} + +type consumerGenerationPair struct { + MemberID string + Generation int +} + +// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment. +type consumerPair struct { + SrcMemberID string + DstMemberID string +} + +// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers. +type partitionMovements struct { + PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool + Movements map[topicPartitionAssignment]consumerPair +} + +func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair { + pair := p.Movements[partition] + delete(p.Movements, partition) + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + delete(partitionMovementsForThisTopic[pair], partition) + if len(partitionMovementsForThisTopic[pair]) == 0 { + delete(partitionMovementsForThisTopic, pair) + } + if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 { + delete(p.PartitionMovementsByTopic, partition.Topic) + } + return pair +} + +func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) { + p.Movements[partition] = pair + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + if _, exists := partitionMovementsForThisTopic[pair]; !exists { + partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic[pair][partition] = true +} + +func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) { + pair := consumerPair{ + SrcMemberID: oldConsumer, + DstMemberID: newConsumer, + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + existingPair := p.removeMovementRecordOfPartition(partition) + if existingPair.DstMemberID != oldConsumer { + Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer) + } + if existingPair.SrcMemberID != newConsumer { + // the partition is not moving back to its previous consumer + p.addPartitionMovementRecord(partition, consumerPair{ + SrcMemberID: existingPair.SrcMemberID, + DstMemberID: newConsumer, + }) + } + } else { + p.addPartitionMovementRecord(partition, pair) + } +} + +func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment { + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + return partition + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + if oldConsumer != p.Movements[partition].DstMemberID { + Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer) + } + oldConsumer = p.Movements[partition].SrcMemberID + } + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + reversePair := consumerPair{ + SrcMemberID: newConsumer, + DstMemberID: oldConsumer, + } + if _, exists := partitionMovementsForThisTopic[reversePair]; !exists { + return partition + } + var reversePairPartition topicPartitionAssignment + for otherPartition := range partitionMovementsForThisTopic[reversePair] { + reversePairPartition = otherPartition + } + return reversePairPartition +} + +func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { + if src == dst { + return currentPath, false + } + if len(pairs) == 0 { + return currentPath, false + } + for _, pair := range pairs { + if src == pair.SrcMemberID && dst == pair.DstMemberID { + currentPath = append(currentPath, src, dst) + return currentPath, true + } + } + + for _, pair := range pairs { + if pair.SrcMemberID == src { + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ + } + } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) + } + } + return currentPath, false +} + +func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { + superCycle := make([]string, len(cycle)-1) + for i := 0; i < len(cycle)-1; i++ { + superCycle[i] = cycle[i] + } + superCycle = append(superCycle, cycle...) + for _, foundCycle := range cycles { + if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { + return true + } + } + return false +} + +func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { + cycles := make([][]string, 0) + for _, pair := range pairs { + // create a deep copy of the pairs, excluding the current pair + reducedPairs := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedPairs[i] = pair + i++ + } + } + if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked { + if !p.in(path, cycles) { + cycles = append(cycles, path) + Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path) + } + } + } + + // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. + // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized + // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. + for _, cycle := range cycles { + if len(cycle) == 3 { + return true + } + } + return false +} + +func (p *partitionMovements) isSticky() bool { + for topic, movements := range p.PartitionMovementsByTopic { + movementPairs := make([]consumerPair, len(movements)) + i := 0 + for pair := range movements { + movementPairs[i] = pair + i++ + } + if p.hasCycles(movementPairs) { + Logger.Printf("Stickiness is violated for topic %s", topic) + Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements) + return false + } + } + return true +} + +func indexOfSubList(source []string, target []string) int { + targetSize := len(target) + maxCandidate := len(source) - targetSize +nextCand: + for candidate := 0; candidate <= maxCandidate; candidate++ { + j := candidate + for i := 0; i < targetSize; i++ { + if target[i] != source[j] { + // Element mismatch, try next cand + continue nextCand + } + j++ + } + // All elements of candidate matched target + return candidate + } + return -1 +} + +type consumerGroupMember struct { + id string + assignments []topicPartitionAssignment +} + +// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted +// in descending order (most assignments to least assignments). +type assignmentPriorityQueue []*consumerGroupMember + +func (pq assignmentPriorityQueue) Len() int { return len(pq) } + +func (pq assignmentPriorityQueue) Less(i, j int) bool { + // order asssignment priority queue in descending order using assignment-count/member-id + if len(pq[i].assignments) == len(pq[j].assignments) { + return strings.Compare(pq[i].id, pq[j].id) > 0 + } + return len(pq[i].assignments) > len(pq[j].assignments) +} + +func (pq assignmentPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *assignmentPriorityQueue) Push(x interface{}) { + member := x.(*consumerGroupMember) + *pq = append(*pq, member) +} + +func (pq *assignmentPriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + member := old[n-1] + *pq = old[0 : n-1] + return member +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go new file mode 100644 index 00000000000..dd01e4ef1fb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -0,0 +1,1475 @@ +package sarama + +import ( + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + conf *Config + rack *string + + id int32 + addr string + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + responses chan responsePromise + done chan bool + + registeredMetrics []string + + incomingByteRate metrics.Meter + requestRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + requestsInFlight metrics.Counter + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + + kerberosAuthenticator GSSAPIKerberosAuth +} + +// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker +type SASLMechanism string + +const ( + // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) + SASLTypeOAuth = "OAUTHBEARER" + // SASLTypePlaintext represents the SASL/PLAIN mechanism + SASLTypePlaintext = "PLAIN" + // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. + SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" + // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. + SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" + SASLTypeGSSAPI = "GSSAPI" + // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL auth using opaque packets. + SASLHandshakeV0 = int16(0) + // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL by wrapping tokens with Kafka protocol headers. + SASLHandshakeV1 = int16(1) + // SASLExtKeyAuth is the reserved extension key name sent as part of the + // SASL/OAUTHBEARER initial client response + SASLExtKeyAuth = "auth" +) + +// AccessToken contains an access token used to authenticate a +// SASL/OAUTHBEARER client along with associated metadata. +type AccessToken struct { + // Token is the access token payload. + Token string + // Extensions is a optional map of arbitrary key-value pairs that can be + // sent with the SASL/OAUTHBEARER initial client response. These values are + // ignored by the SASL server if they are unexpected. This feature is only + // supported by Kafka >= 2.1.0. + Extensions map[string]string +} + +// AccessTokenProvider is the interface that encapsulates how implementors +// can generate access tokens for Kafka broker authentication. +type AccessTokenProvider interface { + // Token returns an access token. The implementation should ensure token + // reuse so that multiple calls at connect time do not create multiple + // tokens. The implementation should also periodically refresh the token in + // order to guarantee that each call returns an unexpired token. This + // method should not block indefinitely--a timeout error should be returned + // after a short period of inactivity so that the broker connection logic + // can log debugging information and retry. + Token() (*AccessToken, error) +} + +// SCRAMClient is a an interface to a SCRAM +// client implementation. +type SCRAMClient interface { + // Begin prepares the client for the SCRAM exchange + // with the server with a user name and a password + Begin(userName, password, authzID string) error + // Step steps client through the SCRAM exchange. It is + // called repeatedly until it errors or `Done` returns true. + Step(challenge string) (response string, err error) + // Done should return true when the SCRAM conversation + // is over. + Done() bool +} + +type responsePromise struct { + requestTime time.Time + correlationID int32 + headerVersion int16 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targeting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + b.lock.Lock() + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + if b.connErr != nil { + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + if conf.Net.TLS.Enable { + b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) + } + + b.conn = newBufConn(b.conn) + b.conf = conf + + // Create or reuse the global metrics shared between brokers + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) + // Do not gather metrics for seeded broker (only used during bootstrap) because they share + // the same id (-1) and are already exposed through the global metrics above + if b.id >= 0 { + b.registerMetrics() + } + + if conf.Net.SASL.Enable { + b.connErr = b.authenticateViaSASL() + + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +// Close closes the broker resources +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + b.unregisterMetrics() + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + atomic.StoreInt32(&b.opened, 0) + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +// Rack returns the broker's rack as retrieved from Kafka's metadata or the +// empty string if it is not known. The returned value corresponds to the +// broker's broker.rack configuration setting. Requires protocol version to be +// at least v0.10.0.0. +func (b *Broker) Rack() string { + if b.rack == nil { + return "" + } + return *b.rack +} + +// GetMetadata send a metadata request and returns a metadata response or error +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// FindCoordinator sends a find coordinate request and returns a response or error +func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + response := new(FindCoordinatorResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// GetAvailableOffsets return an offset response or error +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// Produce returns a produce response or error +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var ( + response *ProduceResponse + err error + ) + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +// Fetch returns a FetchResponse or error +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// CommitOffset return an Offset commit response or error +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// FetchOffset returns an offset fetch response or error +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + response.Version = request.Version // needed to handle the two header versions + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// JoinGroup returns a join group response or error +func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { + response := new(JoinGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// SyncGroup returns a sync group response or error +func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { + response := new(SyncGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// LeaveGroup return a leave group response or error +func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { + response := new(LeaveGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// Heartbeat returns a heartbeat response or error +func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { + response := new(HeartbeatResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// ListGroups return a list group response or error +func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { + response := new(ListGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DescribeGroups return describe group response or error +func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { + response := new(DescribeGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// ApiVersions return api version response or error +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// CreateTopics send a create topic request and returns create topic response +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DeleteTopics sends a delete topic request and returns delete topic response +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// CreatePartitions sends a create partition request and returns create +// partitions response or error +func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + response := new(CreatePartitionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AlterPartitionReassignments sends a alter partition reassignments request and +// returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// ListPartitionReassignments sends a list partition reassignments request and +// returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DeleteRecords send a request to delete records and return delete record +// response or error +func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { + response := new(DeleteRecordsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DescribeAcls sends a describe acl request and returns a response or error +func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { + response := new(DescribeAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// CreateAcls sends a create acl request and returns a response or error +func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { + response := new(CreateAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DeleteAcls sends a delete acl request and returns a response or error +func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { + response := new(DeleteAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// InitProducerID sends an init producer request and returns a response or error +func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { + response := new(InitProducerIDResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AddPartitionsToTxn send a request to add partition to txn and returns +// a response or error +func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { + response := new(AddPartitionsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AddOffsetsToTxn sends a request to add offsets to txn and returns a response +// or error +func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { + response := new(AddOffsetsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// EndTxn sends a request to end txn and returns a response or error +func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { + response := new(EndTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// TxnOffsetCommit sends a request to commit transaction offsets and returns +// a response or error +func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { + response := new(TxnOffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DescribeConfigs sends a request to describe config and returns a response or +// error +func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + response := new(DescribeConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// AlterConfigs sends a request to alter config and return a response or error +func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { + response := new(AlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error +func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) { + response := new(IncrementalAlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DeleteGroups sends a request to delete groups and returns a response or error +func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { + response := new(DeleteGroupsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + +// DescribeLogDirs sends a request to get the broker's log dir paths and sizes +func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { + response := new(DescribeLogDirsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// DescribeUserScramCredentials sends a request to get SCRAM users +func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) { + res := new(DescribeUserScramCredentialsResponse) + + err := b.sendAndReceive(req, res) + if err != nil { + return nil, err + } + + return res, err +} + +func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) { + res := new(AlterUserScramCredentialsResponse) + + err := b.sendAndReceive(req, res) + if err != nil { + return nil, err + } + + return res, nil +} + +// readFull ensures the conn ReadDeadline has been setup before making a +// call to io.ReadFull +func (b *Broker) readFull(buf []byte) (n int, err error) { + if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil { + return 0, err + } + + return io.ReadFull(b.conn, buf) +} + +// write ensures the conn WriteDeadline has been setup before making a +// call to conn.Write +func (b *Broker) write(buf []byte) (n int, err error) { + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return nil, err + } + + requestTime := time.Now() + // Will be decremented in responseReceiver (except error or request with NoResponse) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + b.addRequestInFlightMetrics(-1) + return nil, err + } + b.correlationID++ + + if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) + return nil, nil + } + + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + + promise, err := b.send(req, res != nil, responseHeaderVersion) + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return versionedDecode(buf, res, req.version()) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder, version int16) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + if version >= 1 { + b.rack, err = pd.getNullableString() + if err != nil { + return err + } + } + + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) + if _, _, err := net.SplitHostPort(b.addr); err != nil { + return err + } + + return nil +} + +func (b *Broker) encode(pe packetEncoder, version int16) (err error) { + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + if version >= 1 { + err = pe.putNullableString(b.rack) + if err != nil { + return err + } + } + + return nil +} + +func (b *Broker) responseReceiver() { + var dead error + + for response := range b.responses { + if dead != nil { + // This was previously incremented in send() and + // we are not calling updateIncomingCommunicationMetrics() + b.addRequestInFlightMetrics(-1) + response.errors <- dead + continue + } + + headerLength := getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) + + bytesReadHeader, err := b.readFull(header) + requestLatency := time.Since(response.requestTime) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = versionedDecode(header, &decodedHeader, response.headerVersion) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + response.errors <- dead + continue + } + + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) + bytesReadBody, err := b.readFull(buf) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) + if err != nil { + dead = err + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} + +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + +func (b *Broker) authenticateViaSASL() error { + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1() + case SASLTypeGSSAPI: + return b.sendAndReceiveKerberos() + default: + return b.sendAndReceiveSASLPlainAuth() + } +} + +func (b *Broker) sendAndReceiveKerberos() error { + b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI + if b.kerberosAuthenticator.NewKerberosClientFunc == nil { + b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient + } + return b.kerberosAuthenticator.Authorize(b) +} + +func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { + rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return err + } + + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + + header := make([]byte, 8) // response header + _, err = b.readFull(header) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } + + length := binary.BigEndian.Uint32(header[:4]) + payload := make([]byte, length-4) + n, err := b.readFull(payload) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) + res := &SaslHandshakeResponse{} + + err = versionedDecode(payload, res, 0) + if err != nil { + Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) + return err + } + + if res.Err != ErrNoError { + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) + return res.Err + } + + Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) + return nil +} + +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// With SASL v0 handshake and auth then: +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. +// +// With SASL v1 handshake and auth then: +// When credentials are invalid, Kafka replies with a SaslAuthenticate response +// containing an error code and message detailing the authentication failure. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + // default to V0 to allow for backward compatibility when SASL is enabled + // but not the handshake + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } + + if b.conf.Net.SASL.Version == SASLHandshakeV1 { + return b.sendAndReceiveV1SASLPlainAuth() + } + return b.sendAndReceiveV0SASLPlainAuth() +} + +// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol +func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { + length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) // 4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password) + + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.write(authBytes) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := b.readFull(header) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} + +// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol +func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { + correlationID := b.correlationID + + requestTime := time.Now() + + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + + bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) + b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) + + // With v1 sasl we get an error message set in the response we can return + if err != nil { + Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) + return err + } + + return nil +} + +// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 +// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 +func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { + if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { + return err + } + + token, err := provider.Token() + if err != nil { + return err + } + + message, err := buildClientFirstMessage(token) + if err != nil { + return err + } + + challenged, err := b.sendClientMessage(message) + if err != nil { + return err + } + + if challenged { + // Abort the token exchange. The broker returns the failure code. + _, err = b.sendClientMessage([]byte(`\x01`)) + } + + return err +} + +// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true +// if the broker responds with a challenge, in which case the token is +// rejected. +func (b *Broker) sendClientMessage(message []byte) (bool, error) { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + + bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + return false, err + } + + b.correlationID++ + + res := &SaslAuthenticateResponse{} + bytesRead, err := b.receiveSASLServerResponse(res, correlationID) + + requestLatency := time.Since(requestTime) + b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + + isChallenge := len(res.SaslAuthBytes) > 0 + + if isChallenge && err != nil { + Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) + } + + return isChallenge, err +} + +func (b *Broker) sendAndReceiveSASLSCRAMv1() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + } + + for !scramClient.Done() { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) + msg, err = scramClient.Step(string(challenge)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + Logger.Println("SASL authentication succeeded") + return nil +} + +func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { + rb := &SaslAuthenticateRequest{msg} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + _, err := b.readFull(buf) + if err != nil { + return nil, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return nil, err + } + + if header.correlationID != correlationID { + return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + _, err = b.readFull(buf) + if err != nil { + return nil, err + } + + res := &SaslAuthenticateResponse{} + if err := versionedDecode(buf, res, 0); err != nil { + return nil, err + } + if res.Err != ErrNoError { + return nil, res.Err + } + return res.SaslAuthBytes, nil +} + +// Build SASL/OAUTHBEARER initial client response as described by RFC-7628 +// https://tools.ietf.org/html/rfc7628 +func buildClientFirstMessage(token *AccessToken) ([]byte, error) { + var ext string + + if token.Extensions != nil && len(token.Extensions) > 0 { + if _, ok := token.Extensions[SASLExtKeyAuth]; ok { + return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) + } + ext = "\x01" + mapToString(token.Extensions, "=", "\x01") + } + + resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) + + return resp, nil +} + +// mapToString returns a list of key-value pairs ordered by key. +// keyValSep separates the key from the value. elemSep separates each pair. +func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { + buf := make([]string, 0, len(extensions)) + + for k, v := range extensions { + buf = append(buf, k+keyValSep+v) + } + + sort.Strings(buf) + + return strings.Join(buf, elemSep) +} + +func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + rb := &SaslAuthenticateRequest{authBytes} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { + rb := &SaslAuthenticateRequest{initialResp} + + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + bytesRead, err := b.readFull(buf) + if err != nil { + return bytesRead, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return bytesRead, err + } + + if header.correlationID != correlationID { + return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + c, err := b.readFull(buf) + bytesRead += c + if err != nil { + return bytesRead, err + } + + if err := versionedDecode(buf, res, 0); err != nil { + return bytesRead, err + } + + if res.Err != ErrNoError { + return bytesRead, res.Err + } + + return bytesRead, nil +} + +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyAndInFlightMetrics(requestLatency) + b.responseRate.Mark(1) + + if b.brokerResponseRate != nil { + b.brokerResponseRate.Mark(1) + } + + responseSize := int64(bytes) + b.incomingByteRate.Mark(responseSize) + if b.brokerIncomingByteRate != nil { + b.brokerIncomingByteRate.Mark(responseSize) + } + + b.responseSize.Update(responseSize) + if b.brokerResponseSize != nil { + b.brokerResponseSize.Update(responseSize) + } +} + +func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } + + b.addRequestInFlightMetrics(-1) +} + +func (b *Broker) addRequestInFlightMetrics(i int64) { + b.requestsInFlight.Inc(i) + if b.brokerRequestsInFlight != nil { + b.brokerRequestsInFlight.Inc(i) + } +} + +func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { + b.requestRate.Mark(1) + if b.brokerRequestRate != nil { + b.brokerRequestRate.Mark(1) + } + + requestSize := int64(bytes) + b.outgoingByteRate.Mark(requestSize) + if b.brokerOutgoingByteRate != nil { + b.brokerOutgoingByteRate.Mark(requestSize) + } + + b.requestSize.Update(requestSize) + if b.brokerRequestSize != nil { + b.brokerRequestSize.Update(requestSize) + } +} + +func (b *Broker) registerMetrics() { + b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") + b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerRequestSize = b.registerHistogram("request-size") + b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") + b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") + b.brokerResponseRate = b.registerMeter("response-rate") + b.brokerResponseSize = b.registerHistogram("response-size") + b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") +} + +func (b *Broker) unregisterMetrics() { + for _, name := range b.registeredMetrics { + b.conf.MetricRegistry.Unregister(name) + } + b.registeredMetrics = nil +} + +func (b *Broker) registerMeter(name string) metrics.Meter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerHistogram(name string) metrics.Histogram { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerCounter(name string) metrics.Counter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) +} + +func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { + if cfg == nil { + cfg = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + } + if cfg.ServerName != "" { + return cfg + } + + c := cfg.Clone() + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) + } + c.ServerName = sn + return c +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go new file mode 100644 index 00000000000..c0918ba355a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/client.go @@ -0,0 +1,1112 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. It is safe to share a client amongst many +// users, however Kafka will process requests from a single client strictly in serial, +// so it is generally more efficient to use the default one client per producer/consumer. +type Client interface { + // Config returns the Config struct of the client. This struct should not be + // altered after it has been created. + Config() *Config + + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. You can call RefreshController + // to update the cached value. Requires Kafka 0.10 or higher. + Controller() (*Broker, error) + + // RefreshController retrieves the cluster controller from fresh metadata + // and stores it in the local cache. Requires Kafka 0.10 or higher. + RefreshController() (*Broker, error) + + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + + // Broker returns the active Broker if available for the broker ID. + Broker(brokerID int32) (*Broker, error) + + // Topics returns the set of available topics as retrieved from cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for + // the given topic, where "writable" means "having a valid leader accepting + // writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current + // topic/partition, as determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + + // OfflineReplicas returns the set of all offline replica IDs for the given + // partition. Offline replicas are replicas which are offline + OfflineReplicas(topic string, partitionID int32) ([]int32, error) + + // RefreshBrokers takes a list of addresses to be used as seed brokers. + // Existing broker connections are closed and the updated list of seed brokers + // will be used for the next metadata fetch. + RefreshBrokers(addrs []string) error + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh + // metadata for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it + // in local cache. This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // InitProducerID retrieves information required for Idempotent Producer + InitProducerID() (*InitProducerIDResponse, error) + + // Close shuts down all broker connections managed by this client. It is required + // to call this function before a client object passes out of scope, as it will + // otherwise leak memory. You must close any Producers or Consumers using a client + // before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the partition. You + // can send this to a client's GetOffset method to get this offset, or when + // calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a + // partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming from the + // oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + metadataTopics: make(map[string]none), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + client.randomizeSeedBrokers(addrs) + + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0, len(client.brokers)) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + +func (client *client) Broker(brokerID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + broker, ok := client.brokers[brokerID] + if !ok { + return nil, ErrBrokerNotFound + } + _ = broker.Open(client.conf) + return broker, nil +} + +func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + var err error + for broker := client.any(); broker != nil; broker = client.any() { + req := &InitProducerIDRequest{} + + response, err := broker.InitProducerID(req) + switch err.(type) { + case nil: + return response, nil + default: + // some error, remove that broker and try again + Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + return nil, err +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + client.metadataTopics = nil + + return nil +} + +func (client *client) Closed() bool { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) MetadataTopics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadataTopics)) + for topic := range client.metadataTopics { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + // no partitions found after refresh metadata + if len(partitions) == 0 { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Replicas), metadata.Err + } + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Isr), metadata.Err + } + return dupInt32Slice(metadata.Isr), nil +} + +func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + } + return dupInt32Slice(metadata.OfflineReplicas), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err = client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshBrokers(addrs []string) error { + if client.Closed() { + return ErrClosedClient + } + + client.lock.Lock() + defer client.lock.Unlock() + + for _, broker := range client.brokers { + _ = broker.Close() + delete(client.brokers, broker.ID()) + } + + client.seedBrokers = nil + client.deadSeeds = nil + + client.randomizeSeedBrokers(addrs) + + return nil +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if topic == "" { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + deadline := time.Time{} + if client.conf.Metadata.Timeout > 0 { + deadline = time.Now().Add(client.conf.Metadata.Timeout) + } + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Controller() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + if !client.conf.Version.IsAtLeast(V0_10_0_0) { + return nil, ErrUnsupportedVersion + } + + controller := client.cachedController() + if controller == nil { + if err := client.refreshMetadata(); err != nil { + return nil, err + } + controller = client.cachedController() + } + + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +// deregisterController removes the cached controllerID +func (client *client) deregisterController() { + client.lock.Lock() + defer client.lock.Unlock() + delete(client.brokers, client.controllerID) +} + +// RefreshController retrieves the cluster controller from fresh metadata +// and stores it in the local cache. Requires Kafka 0.10 or higher. +func (client *client) RefreshController() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.deregisterController() + + if err := client.refreshMetadata(); err != nil { + return nil, err + } + + controller := client.cachedController() + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +func (client *client) randomizeSeedBrokers(addrs []string) { + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } +} + +func (client *client) updateBroker(brokers []*Broker) { + currentBroker := make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers == nil { + Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) + return + } + + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := client.refreshMetadata(); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) refreshMetadata() error { + var topics []string + + if !client.conf.Metadata.Full { + if specificTopics, err := client.MetadataTopics(); err != nil { + return err + } else if len(specificTopics) == 0 { + return ErrNoTopicsToUpdateMetadata + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { + return err + } + + return nil +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error { + pastDeadline := func(backoff time.Duration) bool { + if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) { + // we are past the deadline + return true + } + return false + } + retry := func(err error) error { + if attemptsRemaining > 0 { + backoff := client.computeBackoff(attemptsRemaining) + if pastDeadline(backoff) { + Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + if backoff > 0 { + time.Sleep(backoff) + } + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + } + return err + } + + broker := client.any() + for ; broker != nil && !pastDeadline(0); broker = client.any() { + allowAutoTopicCreation := true + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + allowAutoTopicCreation = false + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + + req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} + if client.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } else if client.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 1 + } + response, err := broker.GetMetadata(req) + switch err := err.(type) { + case nil: + allKnownMetaData := len(topics) == 0 + // valid response, use it + shouldRetry, err := client.updateMetadata(response, allKnownMetaData) + if shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } + return err + + case PacketEncodingError: + // didn't even send, return the error + return err + + case KError: + // if SASL auth error return as this _should_ be a non retryable err for all brokers + if err == ErrSASLAuthenticationFailed { + Logger.Println("client/metadata failed SASL authentication") + return err + } + + if err == ErrTopicAuthorizationFailed { + Logger.Println("client is not authorized to access this topic. The topics were: ", topics) + return err + } + // else remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + + default: + // some other error, remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + if broker != nil { + Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) + return retry(ErrOutOfBrokers) + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + if client.Closed() { + return + } + + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker + // - otherwise ignore it, replacing our existing one would just bounce the connection + client.updateBroker(data.Brokers) + + client.controllerID = data.ControllerID + + if allKnownMetaData { + client.metadata = make(map[string]map[int32]*PartitionMetadata) + client.metadataTopics = make(map[string]none) + client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) + } + for _, topic := range data.Topics { + // topics must be added firstly to `metadataTopics` to guarantee that all + // requested topics must be recorded to keep them trackable for periodically + // metadata refresh. + if _, exists := client.metadataTopics[topic.Name]; !exists { + client.metadataTopics[topic.Name] = none{} + } + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + // no-op + case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; ok { + return client.brokers[coordinatorID] + } + return nil +} + +func (client *client) cachedController() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers[client.controllerID] +} + +func (client *client) computeBackoff(attemptsRemaining int) time.Duration { + if client.conf.Metadata.Retry.BackoffFunc != nil { + maxRetries := client.conf.Metadata.Retry.Max + retries := maxRetries - attemptsRemaining + return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) + } + return client.conf.Metadata.Retry.Backoff +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { + retry := func(err error) (*FindCoordinatorResponse, error) { + if attemptsRemaining > 0 { + backoff := client.computeBackoff(attemptsRemaining) + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + time.Sleep(backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(FindCoordinatorRequest) + request.CoordinatorKey = consumerGroup + request.CoordinatorType = CoordinatorGroup + + response, err := broker.FindCoordinator(request) + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + case ErrGroupAuthorizationFailed: + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + return retry(ErrGroupAuthorizationFailed) + + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// nopCloserClient embeds an existing Client, but disables +// the Close method (yet all other methods pass +// through unchanged). This is for use in larger structs +// where it is undesirable to close the client that was +// passed in by the caller. +type nopCloserClient struct { + Client +} + +// Close intercepts and purposely does not call the underlying +// client's Close() method. +func (ncc *nopCloserClient) Close() error { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go new file mode 100644 index 00000000000..12cd7c3d510 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -0,0 +1,194 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4WriterPool = sync.Pool{ + New: func() interface{} { + return lz4.NewWriter(nil) + }, + } + + gzipWriterPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + gzipWriterPoolForCompressionLevel1 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 1) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel2 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 2) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel3 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 3) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel4 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 4) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel5 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 5) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel6 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 6) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel7 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 7) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel8 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 8) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel9 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 9) + if err != nil { + panic(err) + } + return gz + }, + } +) + +func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + buf bytes.Buffer + writer *gzip.Writer + ) + + switch level { + case CompressionLevelDefault: + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + case 1: + writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel1.Put(writer) + writer.Reset(&buf) + case 2: + writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel2.Put(writer) + writer.Reset(&buf) + case 3: + writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel3.Put(writer) + writer.Reset(&buf) + case 4: + writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel4.Put(writer) + writer.Reset(&buf) + case 5: + writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel5.Put(writer) + writer.Reset(&buf) + case 6: + writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel6.Put(writer) + writer.Reset(&buf) + case 7: + writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel7.Put(writer) + writer.Reset(&buf) + case 8: + writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel8.Put(writer) + writer.Reset(&buf) + case 9: + writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel9.Put(writer) + writer.Reset(&buf) + default: + writer, err = gzip.NewWriterLevel(&buf, level) + if err != nil { + return nil, err + } + } + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionSnappy: + return snappy.Encode(data), nil + case CompressionLZ4: + writer := lz4WriterPool.Get().(*lz4.Writer) + defer lz4WriterPool.Put(writer) + + var buf bytes.Buffer + writer.Reset(&buf) + + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionZSTD: + return zstdCompress(nil, data) + default: + return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go new file mode 100644 index 00000000000..43e739cad95 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config.go @@ -0,0 +1,765 @@ +package sarama + +import ( + "compress/gzip" + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "regexp" + "time" + + "github.com/rcrowley/go-metrics" + "golang.org/x/net/proxy" +) + +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. + Admin struct { + Retry struct { + // The total number of times to retry sending (retriable) admin requests (default 5). + // Similar to the `retries` setting of the JVM AdminClientConfig. + Max int + // Backoff time between retries of a failed request (default 100ms) + Backoff time.Duration + } + // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, + // including topics, brokers, configurations and ACLs (defaults to 3 seconds). + Timeout time.Duration + } + + // Net is the namespace for network-level properties used by the Broker, and + // shared by the Client/Producer/Consumer. + Net struct { + // How many outstanding requests a connection is allowed to have before + // sending on it blocks (default 5). + MaxOpenRequests int + + // All three of the below configurations are similar to the + // `socket.timeout.ms` setting in JVM kafka. All of them default + // to 30 seconds. + DialTimeout time.Duration // How long to wait for the initial connection. + ReadTimeout time.Duration // How long to wait for a response. + WriteTimeout time.Duration // How long to wait for a transmit. + + TLS struct { + // Whether or not to use TLS when connecting to the broker + // (defaults to false). + Enable bool + // The TLS configuration to use for secure connections if + // enabled (defaults to nil). + Config *tls.Config + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + // SASLMechanism is the name of the enabled SASL mechanism. + // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). + Mechanism SASLMechanism + // Version is the SASL Protocol Version to use + // Kafka > 1.x should use V1, except on Azure EventHub which use V0 + Version int16 + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool + // AuthIdentity is an (optional) authorization identity (authzid) to + // use for SASL/PLAIN authentication (if different from User) when + // an authenticated user is permitted to act as the presented + // alternative user. See RFC4616 for details. + AuthIdentity string + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string + // Password for SASL/PLAIN authentication + Password string + // authz id used for SASL/SCRAM authentication + SCRAMAuthzID string + // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM + // client used to perform the SCRAM exchange with the server. + SCRAMClientGeneratorFunc func() SCRAMClient + // TokenProvider is a user-defined callback for generating + // access tokens for SASL/OAUTHBEARER auth. See the + // AccessTokenProvider interface docs for proper implementation + // guidelines. + TokenProvider AccessTokenProvider + + GSSAPI GSSAPIConfig + } + + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. + KeepAlive time.Duration + + // LocalAddr is the local address to use when dialing an + // address. The address must be of a compatible type for the + // network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr + + Proxy struct { + // Whether or not to use proxy when connecting to the broker + // (defaults to false). + Enable bool + // The proxy dialer to use enabled (defaults to nil). + Dialer proxy.Dialer + } + } + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election (default 3). + Max int + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + // How frequently to refresh the cluster metadata in the background. + // Defaults to 10 minutes. Set to 0 to disable. Similar to + // `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool + + // How long to wait for a successful metadata response. + // Disabled by default which means a metadata request against an unreachable + // cluster (all brokers are unreachable or unresponsive) can take up to + // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` + // to fail. + Timeout time.Duration + } + + // Producer is the namespace for configuration related to producing messages, + // used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be + // set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults + // to WaitForLocal). Equivalent to the `request.required.acks` setting of the + // JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of + // RequiredAcks (defaults to 10 seconds). This is only relevant when + // RequiredAcks is set to WaitForAll or a number > 1. Only supports + // millisecond resolution, nanoseconds will be truncated. Equivalent to + // the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). + // Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // The level of compression to use on messages. The meaning depends + // on the actual compression type used and defaults to default compression + // level for the codec. + CompressionLevel int + // Generates partitioners for choosing the partition to send messages to + // (defaults to hashing the message key). Similar to the `partitioner.class` + // setting for the JVM producer. + Partitioner PartitionerConstructor + // If enabled, the producer will ensure that exactly one copy of each message is + // written. + Idempotent bool + + // Return specifies what channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. + Return struct { + // If enabled, successfully delivered messages will be returned on the + // Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the + // Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and + // sent to the broker. By default, messages are sent as fast as possible, and + // all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + // The best-effort number of bytes needed to trigger a flush. Use the + // global sarama.MaxRequestSize to set a hard upper limit. + Bytes int + // The best-effort number of messages needed to trigger a flush. Use + // `MaxMessages` to set a hard upper limit. + Messages int + // The best-effort frequency of flushes. Equivalent to + // `queue.buffering.max.ms` setting of JVM producer. + Frequency time.Duration + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 100ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + + // Interceptors to be called when the producer dispatcher reads the + // message for the first time. Interceptors allows to intercept and + // possible mutate the message before they are published to Kafka + // cluster. *ProducerMessage modified by the first interceptor's + // OnSend() is passed to the second interceptor OnSend(), and so on in + // the interceptor chain. + Interceptors []ProducerInterceptor + } + + // Consumer is the namespace for configuration related to consuming messages, + // used by the Consumer. + Consumer struct { + + // Group is the namespace for configuring consumer group. + Group struct { + Session struct { + // The timeout used to detect consumer failures when using Kafka's group management facility. + // The consumer sends periodic heartbeats to indicate its liveness to the broker. + // If no heartbeats are received by the broker before the expiration of this session timeout, + // then the broker will remove this consumer from the group and initiate a rebalance. + // Note that the value must be in the allowable range as configured in the broker configuration + // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s) + Timeout time.Duration + } + Heartbeat struct { + // The expected time between heartbeats to the consumer coordinator when using Kafka's group + // management facilities. Heartbeats are used to ensure that the consumer's session stays active and + // to facilitate rebalancing when new consumers join or leave the group. + // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no + // higher than 1/3 of that value. + // It can be adjusted even lower to control the expected time for normal rebalances (default 3s) + Interval time.Duration + } + Rebalance struct { + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + Strategy BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. + // This is basically a limit on the amount of time needed for all tasks to flush any pending + // data and commit offsets. If the timeout is exceeded, then the worker will be removed from + // the group, which will cause offset commit failures (default 60s). + Timeout time.Duration + + Retry struct { + // When a new consumer joins a consumer group the set of consumers attempt to "rebalance" + // the load to assign partitions to each consumer. If the set of consumers changes while + // this assignment is taking place the rebalance will fail and retry. This setting controls + // the maximum number of attempts before giving up (default 4). + Max int + // Backoff time between retries during rebalance (default 2s) + Backoff time.Duration + } + } + Member struct { + // Custom metadata to include when joining the group. The user data for all joined members + // can be retrieved by sending a DescribeGroupRequest to the broker that is the + // coordinator for the group. + UserData []byte + } + } + + Retry struct { + // How long to wait after a failing to read from a partition before + // trying again (default 2s). + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries int) time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any + // given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker + // will wait until at least this many are available. The default is 1, + // as 0 causes the consumer to spin when no messages are available. + // Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each + // request (default 1MB). This should be larger than the majority of + // your messages, or else the consumer will spend a lot of time + // negotiating sizes and not actually consuming. Similar to the JVM's + // `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a + // single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure + // this is at least as large as your largest message. Defaults to 0 + // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The + // global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min + // bytes to become available before it returns fewer than that anyways. The + // default is 250ms, since 0 causes the consumer to spin when no events are + // available. 100-500ms is a reasonable range for most cases. Kafka only + // supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. + // Note that, since the Messages channel is buffered, the actual grace time is + // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, + // you must read from them to prevent deadlock. + Return struct { + // If enabled, any errors that occurred while consuming are returned on + // the Errors channel (default disabled). + Errors bool + } + + // Offsets specifies configuration for how and when to commit consumed + // offsets. This currently requires the manual use of an OffsetManager + // but will eventually be automated. + Offsets struct { + // Deprecated: CommitInterval exists for historical compatibility + // and should not be used. Please use Consumer.Offsets.AutoCommit + CommitInterval time.Duration + + // AutoCommit specifies configuration for commit messages automatically. + AutoCommit struct { + // Whether or not to auto-commit updated offsets back to the broker. + // (default enabled). + Enable bool + + // How frequently to commit updated offsets. Ineffective unless + // auto-commit is enabled (default 1s) + Interval time.Duration + } + + // The initial offset to use if no offset was previously committed. + // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. + Initial int64 + + // The retention duration for committed offsets. If zero, disabled + // (in which case the `offsets.retention.minutes` option on the + // broker will be used). Kafka only supports precision up to + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. + // (default is 0: disabled). + Retention time.Duration + + Retry struct { + // The total number of times to retry failing commit + // requests during OffsetManager shutdown (default 3). + Max int + } + } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + IsolationLevel IsolationLevel + + // Interceptors to be called just before the record is sent to the + // messages channel. Interceptors allows to intercept and possible + // mutate the message before they are returned to the client. + // *ConsumerMessage modified by the first interceptor's OnConsume() is + // passed to the second interceptor OnConsume(), and so on in the + // interceptor chain. + Interceptors []ConsumerInterceptor + } + + // A user-provided string sent with every request to the brokers for logging, + // debugging, and auditing purposes. Defaults to "sarama", but you should + // probably set it to something specific to your application. + ClientID string + // A rack identifier for this client. This can be any string value which + // indicates where this client is physically located. + // It corresponds with the broker config 'broker.rack' + RackID string + // The number of events to buffer in internal and external channels. This + // permits the producer and consumer to continue processing some messages + // in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion + // The registry to define metrics into. + // Defaults to a local registry. + // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" + // prior to starting Sarama. + // See Examples on how to use the metrics registry + MetricRegistry metrics.Registry +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Admin.Retry.Max = 5 + c.Admin.Retry.Backoff = 100 * time.Millisecond + c.Admin.Timeout = 3 * time.Second + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true + c.Net.SASL.Version = SASLHandshakeV0 + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + c.Producer.CompressionLevel = CompressionLevelDefault + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 1024 * 1024 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + c.Consumer.Offsets.AutoCommit.Enable = true + c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second + c.Consumer.Offsets.Initial = OffsetNewest + c.Consumer.Offsets.Retry.Max = 3 + + c.Consumer.Group.Session.Timeout = 10 * time.Second + c.Consumer.Group.Heartbeat.Interval = 3 * time.Second + c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.Timeout = 60 * time.Second + c.Consumer.Group.Rebalance.Retry.Max = 4 + c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second + + c.ClientID = defaultClientID + c.ChannelBufferSize = 256 + c.Version = DefaultVersion + c.MetricRegistry = metrics.NewRegistry() + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if !c.Net.SASL.Enable { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Offsets.Retention%time.Millisecond != 0 { + Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == defaultClientID { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.SASL.Enable: + if c.Net.SASL.Mechanism == "" { + c.Net.SASL.Mechanism = SASLTypePlaintext + } + + switch c.Net.SASL.Mechanism { + case SASLTypePlaintext: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + case SASLTypeOAuth: + if c.Net.SASL.TokenProvider == nil { + return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") + } + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + if c.Net.SASL.SCRAMClientGeneratorFunc == nil { + return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc") + } + case SASLTypeGSSAPI: + if c.Net.SASL.GSSAPI.ServiceName == "" { + return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") + } + + if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + if c.Net.SASL.GSSAPI.Password == "" { + return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") + } + } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + if c.Net.SASL.GSSAPI.KeyTabPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + } else { + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Username == "" { + return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Realm == "" { + return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used") + } + default: + msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`", + SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI) + return ConfigurationError(msg) + } + } + + // validate the Admin values + switch { + case c.Admin.Timeout <= 0: + return ConfigurationError("Admin.Timeout must be > 0") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + + if c.Producer.Compression == CompressionGZIP { + if c.Producer.CompressionLevel != CompressionLevelDefault { + if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) + } + } + } + + if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { + return ConfigurationError("zstd compression requires Version >= V2_1_0_0") + } + + if c.Producer.Idempotent { + if !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") + } + if c.Producer.Retry.Max == 0 { + return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1") + } + if c.Producer.RequiredAcks != WaitForAll { + return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll") + } + if c.Net.MaxOpenRequests > 1 { + return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1") + } + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + case c.Consumer.Offsets.AutoCommit.Interval <= 0: + return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") + case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: + return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + case c.Consumer.Offsets.Retry.Max < 0: + return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: + return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") + } + + if c.Consumer.Offsets.CommitInterval != 0 { + Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") + } + + // validate IsolationLevel + if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") + } + + // validate the Consumer Group values + switch { + case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond: + return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms") + case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") + case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") + case c.Consumer.Group.Rebalance.Strategy == nil: + return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: + return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") + case c.Consumer.Group.Rebalance.Retry.Max < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0") + case c.Consumer.Group.Rebalance.Retry.Backoff < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + case !validID.MatchString(c.ClientID): + return ConfigurationError("ClientID is invalid") + } + + return nil +} + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 00000000000..bef1053aaed --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,18 @@ +package sarama + +// ConfigResourceType is a type for resources that have configs. +type ConfigResourceType int8 + +// Taken from: +// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 + +const ( + // UnknownResource constant type + UnknownResource ConfigResourceType = 0 + // TopicResource constant type + TopicResource ConfigResourceType = 2 + // BrokerResource constant type + BrokerResource ConfigResourceType = 4 + // BrokerLoggerResource constant type + BrokerLoggerResource ConfigResourceType = 8 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go new file mode 100644 index 00000000000..f9cd172b473 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,949 @@ +package sarama + +import ( + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Headers []*RecordHeader // only set if kafka is version 0.11+ + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + + Key, Value []byte + Topic string + Partition int32 + Offset int64 +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +func (ce ConsumerError) Unwrap() error { + return ce.Err +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +type Consumer interface { + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + conf *Config + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer + client Client + lock sync.Mutex +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + return newConsumer(client) +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumer(cli) +} + +func newConsumer(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + return c.client.Close() +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. +type PartitionConsumer interface { + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + + consumer *consumer + conf *Config + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + preferredReadReplica int32 + + trigger, dying chan none + closeOnce sync.Once + topic string + partition int32 + responseResult error + fetchSize int32 + offset int64 + retries int32 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) computeBackoff() time.Duration { + if child.conf.Consumer.Retry.BackoffFunc != nil { + retries := atomic.AddInt32(&child.retries, 1) + return child.conf.Consumer.Retry.BackoffFunc(int(retries)) + } + return child.conf.Consumer.Retry.Backoff +} + +func (child *partitionConsumer) dispatcher() { + for range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.computeBackoff()): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) preferredBroker() (*Broker, error) { + if child.preferredReadReplica >= 0 { + broker, err := child.consumer.client.Broker(child.preferredReadReplica) + if err == nil { + return broker, nil + } + } + + // if prefered replica cannot be found fallback to leader + return child.consumer.client.Leader(child.topic, child.partition) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + broker, err := child.preferredBroker() + if err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(broker) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + child.closeOnce.Do(func() { + close(child.dying) + }) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + var consumerErrors ConsumerErrors + for err := range child.errors { + consumerErrors = append(consumerErrors, err) + } + + if len(consumerErrors) > 0 { + return consumerErrors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + if child.responseResult == nil { + atomic.StoreInt32(&child.retries, 0) + } + + for i, msg := range msgs { + child.interceptors(msg) + messageSelect: + select { + case <-child.dying: + child.broker.acks.Done() + continue feederLoop + case child.messages <- msg: + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + remainingLoop: + for _, msg = range msgs[i:] { + child.interceptors(msg) + select { + case child.messages <- msg: + case <-child.dying: + break remainingLoop + } + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect + } + } + } + + child.broker.acks.Done() + } + + expiryTicker.Stop() + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + timestamp := msg.Msg.Timestamp + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + if msg.Msg.LogAppendTime { + timestamp = msgBlock.Msg.Timestamp + } + } + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + messages := make([]*ConsumerMessage, 0, len(batch.Records)) + + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if offset < child.offset { + continue + } + timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) + if batch.LogAppendTime { + timestamp = batch.MaxTimestamp + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: timestamp, + Headers: rec.Headers, + }) + child.offset = offset + 1 + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + var ( + metricRegistry = child.conf.MetricRegistry + consumerBatchSizeMetric metrics.Histogram + ) + + if metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + } + + // If request was throttled and empty we log and return without error + if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 { + Logger.Printf( + "consumer/broker/%d FetchResponse throttled %v\n", + child.broker.broker.ID(), response.ThrottleTime) + return nil, nil + } + + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + + consumerBatchSizeMetric.Update(int64(nRecs)) + + child.preferredReadReplica = block.PreferredReadReplica + + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if partialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + // check int32 overflow + if child.fetchSize < 0 { + child.fetchSize = math.MaxInt32 + } + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + // abortedProducerIDs contains producerID which message should be ignored as uncommitted + // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) + // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over + abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) + abortedTransactions := block.getAbortedTransactions() + + var messages []*ConsumerMessage + for _, records := range block.RecordsSet { + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.MsgSet) + if err != nil { + return nil, err + } + + messages = append(messages, messageSetMessages...) + case defaultRecords: + // Consume remaining abortedTransaction up to last offset of current batch + for _, txn := range abortedTransactions { + if txn.FirstOffset > records.RecordBatch.LastOffset() { + break + } + abortedProducerIDs[txn.ProducerID] = struct{}{} + // Pop abortedTransactions so that we never add it again + abortedTransactions = abortedTransactions[1:] + } + + recordBatchMessages, err := child.parseRecords(records.RecordBatch) + if err != nil { + return nil, err + } + + // Parse and commit offset but do not expose messages that are: + // - control records + // - part of an aborted transaction when set to `ReadCommitted` + + // control record + isControl, err := records.isControl() + if err != nil { + // I don't know why there is this continue in case of error to begin with + // Safe bet is to ignore control messages if ReadUncommitted + // and block on them in case of error and ReadCommitted + if child.conf.Consumer.IsolationLevel == ReadCommitted { + return nil, err + } + continue + } + if isControl { + controlRecord, err := records.getControlRecord() + if err != nil { + return nil, err + } + + if controlRecord.Type == ControlRecordAbort { + delete(abortedProducerIDs, records.RecordBatch.ProducerID) + } + continue + } + + // filter aborted transactions + if child.conf.Consumer.IsolationLevel == ReadCommitted { + _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] + if records.RecordBatch.IsTransactional && isAborted { + continue + } + } + + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } + } + + return messages, nil +} + +func (child *partitionConsumer) interceptors(msg *ConsumerMessage) { + for _, interceptor := range child.conf.Consumer.Interceptors { + msg.safelyApplyInterceptor(interceptor) + } +} + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + subscriptions map[*partitionConsumer]none + wait chan none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer +// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks +// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give +// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, +// so the main goroutine can block waiting for work if it has none. +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + // no-op + } + } +} + +// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed +func (bc *brokerConsumer) handleResponses() { + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + if result == nil { + if preferredBroker, err := child.preferredBroker(); err == nil { + if bc.broker.ID() != preferredBroker.ID() { + // not an error but needs redispatching to consume from prefered replica + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } + continue + } + + // Discard any replica preference. + child.preferredReadReplica = -1 + + switch result { + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { + request.Version = 1 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = bc.consumer.conf.Consumer.IsolationLevel + } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } + if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 10 + } + if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { + request.Version = 11 + request.RackID = bc.consumer.conf.RackID + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go new file mode 100644 index 00000000000..2bf236ae53c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -0,0 +1,879 @@ +package sarama + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" +) + +// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. +var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed") + +// ConsumerGroup is responsible for dividing up processing of topics and partitions +// over a collection of processes (the members of the consumer group). +type ConsumerGroup interface { + // Consume joins a cluster of consumers for a given list of topics and + // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler. + // + // The life-cycle of a session is represented by the following steps: + // + // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers) + // and is assigned their "fair share" of partitions, aka 'claims'. + // 2. Before processing starts, the handler's Setup() hook is called to notify the user + // of the claims and allow any necessary preparation or alteration of state. + // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called + // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected + // from concurrent reads/writes. + // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the + // parent context is cancelled or when a server-side rebalance cycle is initiated. + // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called + // to allow the user to perform any final tasks before a rebalance. + // 6. Finally, marked offsets are committed one last time before claims are released. + // + // Please note, that once a rebalance is triggered, sessions must be completed within + // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit + // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout + // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset + // commit failures. + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. + Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error + + // Errors returns a read channel of errors that occurred during the consumer life-cycle. + // By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan error + + // Close stops the ConsumerGroup and detaches any running sessions. It is required to call + // this function before the object passes out of scope, as it will otherwise leak memory. + Close() error +} + +type consumerGroup struct { + client Client + + config *Config + consumer Consumer + groupID string + memberID string + errors chan error + + lock sync.Mutex + closed chan none + closeOnce sync.Once + + userData []byte +} + +// NewConsumerGroup creates a new consumer group the given broker addresses and configuration. +func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := newConsumerGroup(groupID, client) + if err != nil { + _ = client.Close() + } + return c, err +} + +// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +// PLEASE NOTE: consumer groups can only re-use but not share clients. +func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumerGroup(groupID, cli) +} + +func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { + config := client.Config() + if !config.Version.IsAtLeast(V0_10_2_0) { + return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") + } + + consumer, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + + return &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + }, nil +} + +// Errors implements ConsumerGroup. +func (c *consumerGroup) Errors() <-chan error { return c.errors } + +// Close implements ConsumerGroup. +func (c *consumerGroup) Close() (err error) { + c.closeOnce.Do(func() { + close(c.closed) + + // leave group + if e := c.leave(); e != nil { + err = e + } + + // drain errors + go func() { + close(c.errors) + }() + for e := range c.errors { + err = e + } + + if e := c.client.Close(); e != nil { + err = e + } + }) + return +} + +// Consume implements ConsumerGroup. +func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error { + // Ensure group is not closed + select { + case <-c.closed: + return ErrClosedConsumerGroup + default: + } + + c.lock.Lock() + defer c.lock.Unlock() + + // Quick exit when no topics are provided + if len(topics) == 0 { + return fmt.Errorf("no topics provided") + } + + // Refresh metadata for requested topics + if err := c.client.RefreshMetadata(topics...); err != nil { + return err + } + + // Init session + sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + if err == ErrClosedClient { + return ErrClosedConsumerGroup + } else if err != nil { + return err + } + + // loop check topic partition numbers changed + // will trigger rebalance when any topic partitions number had changed + // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine + go c.loopCheckPartitionNumbers(topics, sess) + + // Wait for session exit signal + <-sess.ctx.Done() + + // Gracefully release session claims + return sess.release(true) +} + +func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + if refreshCoordinator { + err := c.client.RefreshCoordinator(c.groupID) + if err != nil { + return c.retryNewSession(ctx, topics, handler, retries, true) + } + } + + return c.newSession(ctx, topics, handler, retries-1) +} + +func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + if retries <= 0 { + return nil, err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + } + + // Join consumer group + join, err := c.joinGroupRequest(coordinator, topics) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch join.Err { + case ErrNoError: + c.memberID = join.MemberId + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, join.Err + } + + // Prepare distribution plan if we joined as the leader + var plan BalanceStrategyPlan + if join.LeaderId == join.MemberId { + members, err := join.GetMembers() + if err != nil { + return nil, err + } + + plan, err = c.balance(members) + if err != nil { + return nil, err + } + } + + // Sync consumer group + groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch groupRequest.Err { + case ErrNoError: + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, groupRequest.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, groupRequest.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, groupRequest.Err + } + + // Retrieve and sort claims + var claims map[string][]int32 + if len(groupRequest.MemberAssignment) > 0 { + members, err := groupRequest.GetMemberAssignment() + if err != nil { + return nil, err + } + claims = members.Topics + c.userData = members.UserData + + for _, partitions := range claims { + sort.Sort(int32Slice(partitions)) + } + } + + return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) +} + +func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { + req := &JoinGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond), + ProtocolType: "consumer", + } + if c.config.Version.IsAtLeast(V0_10_1_0) { + req.Version = 1 + req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) + } + + // use static user-data if configured, otherwise use consumer-group userdata from the last sync + userData := c.config.Consumer.Group.Member.UserData + if len(userData) == 0 { + userData = c.userData + } + meta := &ConsumerGroupMemberMetadata{ + Topics: topics, + UserData: userData, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + + return coordinator.JoinGroup(req) +} + +func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { + req := &SyncGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + GenerationId: generationID, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + for memberID, topics := range plan { + assignment := &ConsumerGroupMemberAssignment{Topics: topics} + userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) + if err != nil { + return nil, err + } + assignment.UserData = userDataBytes + if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { + return nil, err + } + } + return coordinator.SyncGroup(req) +} + +func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) { + req := &HeartbeatRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + } + + return coordinator.Heartbeat(req) +} + +func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { + topics := make(map[string][]int32) + for _, meta := range members { + for _, topic := range meta.Topics { + topics[topic] = nil + } + } + + for topic := range topics { + partitions, err := c.client.Partitions(topic) + if err != nil { + return nil, err + } + topics[topic] = partitions + } + + strategy := c.config.Consumer.Group.Rebalance.Strategy + return strategy.Plan(members, topics) +} + +// Leaves the cluster, called by Close. +func (c *consumerGroup) leave() error { + c.lock.Lock() + defer c.lock.Unlock() + if c.memberID == "" { + return nil + } + + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + return err + } + + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } + + // Unset memberID + c.memberID = "" + + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } +} + +func (c *consumerGroup) handleError(err error, topic string, partition int32) { + if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + err = &ConsumerError{ + Topic: topic, + Partition: partition, + Err: err, + } + } + + if !c.config.Consumer.Return.Errors { + Logger.Println(err) + return + } + + select { + case <-c.closed: + // consumer is closed + return + default: + } + + select { + case c.errors <- err: + default: + // no error listener + } +} + +func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() + defer pause.Stop() + var oldTopicToPartitionNum map[string]int + var err error + if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { + return + } + for { + if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { + return + } else { + for topic, num := range oldTopicToPartitionNum { + if newTopicToPartitionNum[topic] != num { + return // trigger the end of the session on exit + } + } + } + select { + case <-pause.C: + case <-session.ctx.Done(): + Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + // if session closed by other, should be exited + return + case <-c.closed: + return + } + } +} + +func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { + topicToPartitionNum := make(map[string]int, len(topics)) + for _, topic := range topics { + if partitionNum, err := c.client.Partitions(topic); err != nil { + Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + return nil, err + } else { + topicToPartitionNum[topic] = len(partitionNum) + } + } + return topicToPartitionNum, nil +} + +// -------------------------------------------------------------------- + +// ConsumerGroupSession represents a consumer group member session. +type ConsumerGroupSession interface { + // Claims returns information about the claimed partitions by topic. + Claims() map[string][]int32 + + // MemberID returns the cluster member ID. + MemberID() string + + // GenerationID returns the current generation ID. + GenerationID() int32 + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(topic string, partition int32, offset int64, metadata string) + + // Commit the offset to the backend + // + // Note: calling Commit performs a blocking synchronous operation. + Commit() + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(topic string, partition int32, offset int64, metadata string) + + // MarkMessage marks a message as consumed. + MarkMessage(msg *ConsumerMessage, metadata string) + + // Context returns the session context. + Context() context.Context +} + +type consumerGroupSession struct { + parent *consumerGroup + memberID string + generationID int32 + handler ConsumerGroupHandler + + claims map[string][]int32 + offsets *offsetManager + ctx context.Context + cancel func() + + waitGroup sync.WaitGroup + releaseOnce sync.Once + hbDying, hbDead chan none +} + +func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init offset manager + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + if err != nil { + return nil, err + } + + // init context + ctx, cancel := context.WithCancel(ctx) + + // init session + sess := &consumerGroupSession{ + parent: parent, + memberID: memberID, + generationID: generationID, + handler: handler, + offsets: offsets, + claims: claims, + ctx: ctx, + cancel: cancel, + hbDying: make(chan none), + hbDead: make(chan none), + } + + // start heartbeat loop + go sess.heartbeatLoop() + + // create a POM for each claim + for topic, partitions := range claims { + for _, partition := range partitions { + pom, err := offsets.ManagePartition(topic, partition) + if err != nil { + _ = sess.release(false) + return nil, err + } + + // handle POM errors + go func(topic string, partition int32) { + for err := range pom.Errors() { + sess.parent.handleError(err, topic, partition) + } + }(topic, partition) + } + } + + // perform setup + if err := handler.Setup(sess); err != nil { + _ = sess.release(true) + return nil, err + } + + // start consuming + for topic, partitions := range claims { + for _, partition := range partitions { + sess.waitGroup.Add(1) + + go func(topic string, partition int32) { + defer sess.waitGroup.Done() + + // cancel the as session as soon as the first + // goroutine exits + defer sess.cancel() + + // consume a single topic/partition, blocking + sess.consume(topic, partition) + }(topic, partition) + } + } + return sess, nil +} + +func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims } +func (s *consumerGroupSession) MemberID() string { return s.memberID } +func (s *consumerGroupSession) GenerationID() int32 { return s.generationID } + +func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.MarkOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) Commit() { + s.offsets.Commit() +} + +func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.ResetOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) { + s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata) +} + +func (s *consumerGroupSession) Context() context.Context { + return s.ctx +} + +func (s *consumerGroupSession) consume(topic string, partition int32) { + // quick exit if rebalance is due + select { + case <-s.ctx.Done(): + return + case <-s.parent.closed: + return + default: + } + + // get next offset + offset := s.parent.config.Consumer.Offsets.Initial + if pom := s.offsets.findPOM(topic, partition); pom != nil { + offset, _ = pom.NextOffset() + } + + // create new claim + claim, err := newConsumerGroupClaim(s, topic, partition, offset) + if err != nil { + s.parent.handleError(err, topic, partition) + return + } + + // handle errors + go func() { + for err := range claim.Errors() { + s.parent.handleError(err, topic, partition) + } + }() + + // trigger close when session is done + go func() { + select { + case <-s.ctx.Done(): + case <-s.parent.closed: + } + claim.AsyncClose() + }() + + // start processing + if err := s.handler.ConsumeClaim(s, claim); err != nil { + s.parent.handleError(err, topic, partition) + } + + // ensure consumer is closed & drained + claim.AsyncClose() + for _, err := range claim.waitClosed() { + s.parent.handleError(err, topic, partition) + } +} + +func (s *consumerGroupSession) release(withCleanup bool) (err error) { + // signal release, stop heartbeat + s.cancel() + + // wait for consumers to exit + s.waitGroup.Wait() + + // perform release + s.releaseOnce.Do(func() { + if withCleanup { + if e := s.handler.Cleanup(s); e != nil { + s.parent.handleError(e, "", -1) + err = e + } + } + + if e := s.offsets.Close(); e != nil { + err = e + } + + close(s.hbDying) + <-s.hbDead + }) + + return +} + +func (s *consumerGroupSession) heartbeatLoop() { + defer close(s.hbDead) + defer s.cancel() // trigger the end of the session on exit + + pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) + defer pause.Stop() + + retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff) + defer retryBackoff.Stop() + + retries := s.parent.config.Metadata.Retry.Max + for { + coordinator, err := s.parent.client.Coordinator(s.parent.groupID) + if err != nil { + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff) + select { + case <-s.hbDying: + return + case <-retryBackoff.C: + retries-- + } + continue + } + + resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) + if err != nil { + _ = coordinator.Close() + + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + retries-- + continue + } + + switch resp.Err { + case ErrNoError: + retries = s.parent.config.Metadata.Retry.Max + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + return + default: + s.parent.handleError(resp.Err, "", -1) + return + } + + select { + case <-pause.C: + case <-s.hbDying: + return + } + } +} + +// -------------------------------------------------------------------- + +// ConsumerGroupHandler instances are used to handle individual topic/partition claims. +// It also provides hooks for your consumer group session life-cycle and allow you to +// trigger logic before or after the consume loop(s). +// +// PLEASE NOTE that handlers are likely be called from several goroutines concurrently, +// ensure that all state is safely protected against race conditions. +type ConsumerGroupHandler interface { + // Setup is run at the beginning of a new session, before ConsumeClaim. + Setup(ConsumerGroupSession) error + + // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited + // but before the offsets are committed for the very last time. + Cleanup(ConsumerGroupSession) error + + // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). + // Once the Messages() channel is closed, the Handler must finish its processing + // loop and exit. + ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error +} + +// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group. +type ConsumerGroupClaim interface { + // Topic returns the consumed topic name. + Topic() string + + // Partition returns the consumed partition. + Partition() int32 + + // InitialOffset returns the initial offset that was used as a starting point for this claim. + InitialOffset() int64 + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 + + // Messages returns the read channel for the messages that are returned by + // the broker. The messages channel will be closed when a new rebalance cycle + // is due. You must finish processing and mark offsets within + // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually + // re-assigned to another group member. + Messages() <-chan *ConsumerMessage +} + +type consumerGroupClaim struct { + topic string + partition int32 + offset int64 + PartitionConsumer +} + +func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { + pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) + if err == ErrOffsetOutOfRange { + offset = sess.parent.config.Consumer.Offsets.Initial + pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) + } + if err != nil { + return nil, err + } + + go func() { + for err := range pcm.Errors() { + sess.parent.handleError(err, topic, partition) + } + }() + + return &consumerGroupClaim{ + topic: topic, + partition: partition, + offset: offset, + PartitionConsumer: pcm, + }, nil +} + +func (c *consumerGroupClaim) Topic() string { return c.topic } +func (c *consumerGroupClaim) Partition() int32 { return c.partition } +func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset } + +// Drains messages and errors, ensures the claim is fully closed. +func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) { + go func() { + for range c.Messages() { + } + }() + + for err := range c.Errors() { + errs = append(errs, err) + } + return +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go new file mode 100644 index 00000000000..21b11e944fe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -0,0 +1,96 @@ +package sarama + +// ConsumerGroupMemberMetadata holds the metadata for consumer group +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +// ConsumerGroupMemberAssignment holds the member assignment for a consume group +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 00000000000..5c18e048a72 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,38 @@ +package sarama + +// ConsumerMetadataRequest is used for metadata requests +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + tmp := new(FindCoordinatorRequest) + tmp.CoordinatorKey = r.ConsumerGroup + tmp.CoordinatorType = CoordinatorGroup + return tmp.encode(pe) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorRequest) + if err := tmp.decode(pd, version); err != nil { + return err + } + r.ConsumerGroup = tmp.CoordinatorKey + return nil +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 00000000000..7fe0cf9716d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,82 @@ +package sarama + +import ( + "net" + "strconv" +) + +// ConsumerMetadataResponse holds the response for a consumer group meta data requests +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorResponse) + + if err := tmp.decode(pd, version); err != nil { + return err + } + + r.Err = tmp.Err + + r.Coordinator = tmp.Coordinator + if tmp.Coordinator == nil { + return nil + } + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + if r.Coordinator == nil { + r.Coordinator = new(Broker) + r.Coordinator.id = r.CoordinatorID + r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) + } + + tmp := &FindCoordinatorResponse{ + Version: 0, + Err: r.Err, + Coordinator: r.Coordinator, + } + + if err := tmp.encode(pe); err != nil { + return err + } + + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go new file mode 100644 index 00000000000..244a821368d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -0,0 +1,74 @@ +package sarama + +// ControlRecordType ... +type ControlRecordType int + +const ( + // ControlRecordAbort is a control record for abort + ControlRecordAbort ControlRecordType = iota + // ControlRecordCommit is a control record for commit + ControlRecordCommit + // ControlRecordUnknown is a control record of unknown type + ControlRecordUnknown +) + +// Control records are returned as a record by fetchRequest +// However unlike "normal" records, they mean nothing application wise. +// They only serve internal logic for supporting transactions. +type ControlRecord struct { + Version int16 + CoordinatorEpoch int32 + Type ControlRecordType +} + +func (cr *ControlRecord) decode(key, value packetDecoder) error { + var err error + // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not + // Either way, all these version can only be 0 for now + cr.Version, err = key.getInt16() + if err != nil { + return err + } + + recordType, err := key.getInt16() + if err != nil { + return err + } + + switch recordType { + case 0: + cr.Type = ControlRecordAbort + case 1: + cr.Type = ControlRecordCommit + default: + // from JAVA implementation: + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored + cr.Type = ControlRecordUnknown + } + // we want to parse value only if we are decoding control record of known type + if cr.Type != ControlRecordUnknown { + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + } + return nil +} + +func (cr *ControlRecord) encode(key, value packetEncoder) { + value.putInt16(cr.Version) + value.putInt32(cr.CoordinatorEpoch) + key.putInt16(cr.Version) + + switch cr.Type { + case ControlRecordAbort: + key.putInt16(0) + case ControlRecordCommit: + key.putInt16(1) + } +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 00000000000..32236e50f03 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,87 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "sync" +) + +type crcPolynomial int8 + +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli +) + +var crc32FieldPool = sync.Pool{} + +func acquireCrc32Field(polynomial crcPolynomial) *crc32Field { + val := crc32FieldPool.Get() + if val != nil { + c := val.(*crc32Field) + c.polynomial = polynomial + return c + } + return newCRC32Field(polynomial) +} + +func releaseCrc32Field(c *crc32Field) { + crc32FieldPool.Put(c) +} + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int + polynomial crcPolynomial +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} + } + + return nil +} + +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 00000000000..46fb0440249 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,125 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 00000000000..12ce78857bc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,109 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 00000000000..287acd069b6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,178 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 00000000000..7e1448a6692 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,127 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go new file mode 100644 index 00000000000..af45fdaf94e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -0,0 +1,61 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + defer gzipReaderPool.Put(reader) + + return ioutil.ReadAll(reader) + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + defer lz4ReaderPool.Put(reader) + + return ioutil.ReadAll(reader) + case CompressionZSTD: + return zstdDecompress(nil, data) + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go new file mode 100644 index 00000000000..4ac8bbee4cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -0,0 +1,34 @@ +package sarama + +type DeleteGroupsRequest struct { + Groups []string +} + +func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DeleteGroupsRequest) key() int16 { + return 42 +} + +func (r *DeleteGroupsRequest) version() int16 { + return 0 +} + +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { + return V1_1_0_0 +} + +func (r *DeleteGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go new file mode 100644 index 00000000000..5e7b1ed3681 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -0,0 +1,74 @@ +package sarama + +import ( + "time" +) + +type DeleteGroupsResponse struct { + ThrottleTime time.Duration + GroupErrorCodes map[string]KError +} + +func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { + return err + } + for groupID, errorCode := range r.GroupErrorCodes { + if err := pe.putString(groupID); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupErrorCodes = make(map[string]KError, n) + for i := 0; i < n; i++ { + groupID, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + r.GroupErrorCodes[groupID] = KError(errorCode) + } + + return nil +} + +func (r *DeleteGroupsResponse) key() int16 { + return 42 +} + +func (r *DeleteGroupsResponse) version() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { + return V1_1_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go new file mode 100644 index 00000000000..dc106b17d62 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -0,0 +1,130 @@ +package sarama + +import ( + "sort" + "time" +) + +// request message format is: +// [topic] timeout(int32) +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) offset(int64) + +type DeleteRecordsRequest struct { + Topics map[string]*DeleteRecordsRequestTopic + Timeout time.Duration +} + +func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsRequestTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (d *DeleteRecordsRequest) key() int16 { + return 21 +} + +func (d *DeleteRecordsRequest) version() int16 { + return 0 +} + +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsRequestTopic struct { + PartitionOffsets map[int32]int64 // partition => offset +} + +func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { + return err + } + keys := make([]int32, 0, len(t.PartitionOffsets)) + for partition := range t.PartitionOffsets { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + pe.putInt64(t.PartitionOffsets[partition]) + } + return nil +} + +func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.PartitionOffsets = make(map[int32]int64, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + offset, err := pd.getInt64() + if err != nil { + return err + } + t.PartitionOffsets[partition] = offset + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go new file mode 100644 index 00000000000..d530b4c7e91 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -0,0 +1,162 @@ +package sarama + +import ( + "sort" + "time" +) + +// response message format is: +// throttleMs(int32) [topic] +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) low_watermark(int64) error_code(int16) + +type DeleteRecordsResponse struct { + Version int16 + ThrottleTime time.Duration + Topics map[string]*DeleteRecordsResponseTopic +} + +func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + return nil +} + +func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { + d.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsResponseTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + return nil +} + +func (d *DeleteRecordsResponse) key() int16 { + return 21 +} + +func (d *DeleteRecordsResponse) version() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsResponseTopic struct { + Partitions map[int32]*DeleteRecordsResponsePartition +} + +func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { + return err + } + keys := make([]int32, 0, len(t.Partitions)) + for partition := range t.Partitions { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + if err := t.Partitions[partition].encode(pe); err != nil { + return err + } + } + return nil +} + +func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + details := new(DeleteRecordsResponsePartition) + if err = details.decode(pd, version); err != nil { + return err + } + t.Partitions[partition] = details + } + } + + return nil +} + +type DeleteRecordsResponsePartition struct { + LowWatermark int64 + Err KError +} + +func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { + pe.putInt64(t.LowWatermark) + pe.putInt16(int16(t.Err)) + return nil +} + +func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { + lowWatermark, err := pd.getInt64() + if err != nil { + return err + } + t.LowWatermark = lowWatermark + + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 00000000000..ba6780a8e39 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,52 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Version int16 + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + d.Version = version + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return d.Version +} + +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 00000000000..733961a89a0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,82 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 00000000000..4c34880318c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,115 @@ +package sarama + +type DescribeConfigsRequest struct { + Version int16 + Resources []*ConfigResource + IncludeSynonyms bool +} + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putBool(r.IncludeSynonyms) + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + r.Version = version + if r.Version >= 1 { + b, err := pd.getBool() + if err != nil { + return err + } + r.IncludeSynonyms = b + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return r.Version +} + +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 00000000000..928f5a52ab2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,327 @@ +package sarama + +import ( + "fmt" + "time" +) + +type ConfigSource int8 + +func (s ConfigSource) String() string { + switch s { + case SourceUnknown: + return "Unknown" + case SourceTopic: + return "Topic" + case SourceDynamicBroker: + return "DynamicBroker" + case SourceDynamicDefaultBroker: + return "DynamicDefaultBroker" + case SourceStaticBroker: + return "StaticBroker" + case SourceDefault: + return "Default" + } + return fmt.Sprintf("Source Invalid: %d", int(s)) +} + +const ( + SourceUnknown ConfigSource = iota + SourceTopic + SourceDynamicBroker + SourceDynamicDefaultBroker + SourceStaticBroker + SourceDefault +) + +type DescribeConfigsResponse struct { + Version int16 + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Source ConfigSource + Sensitive bool + Synonyms []*ConfigSynonym +} + +type ConfigSynonym struct { + ConfigName string + ConfigValue string + Source ConfigSource +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe, r.Version); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return r.Version +} + +func (r *DescribeConfigsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_0_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe, version); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + + if version <= 0 { + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + } else { + pe.putInt8(int8(r.Source)) + pe.putBool(r.Sensitive) + + if err := pe.putArrayLength(len(r.Synonyms)); err != nil { + return err + } + for _, c := range r.Synonyms { + if err = c.encode(pe, version); err != nil { + return err + } + } + } + + return nil +} + +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + if version == 0 { + r.Source = SourceUnknown + } + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + if version == 0 { + defaultB, err := pd.getBool() + if err != nil { + return err + } + r.Default = defaultB + if defaultB { + r.Source = SourceDefault + } + } else { + source, err := pd.getInt8() + if err != nil { + return err + } + r.Source = ConfigSource(source) + r.Default = r.Source == SourceDefault + } + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + + if version > 0 { + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Synonyms = make([]*ConfigSynonym, n) + + for i := 0; i < n; i++ { + s := &ConfigSynonym{} + if err := s.decode(pd, version); err != nil { + return err + } + r.Synonyms[i] = s + } + } + return nil +} + +func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { + err = pe.putString(c.ConfigName) + if err != nil { + return err + } + + err = pe.putString(c.ConfigValue) + if err != nil { + return err + } + + pe.putInt8(int8(c.Source)) + + return nil +} + +func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { + name, err := pd.getString() + if err != nil { + return nil + } + c.ConfigName = name + + value, err := pd.getString() + if err != nil { + return nil + } + c.ConfigValue = value + + source, err := pd.getInt8() + if err != nil { + return nil + } + c.Source = ConfigSource(source) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go new file mode 100644 index 00000000000..f8962da58fc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -0,0 +1,34 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go new file mode 100644 index 00000000000..bc242e4217d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -0,0 +1,191 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + gd.Err = KError(kerr) + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go new file mode 100644 index 00000000000..c0bf04e04e2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go @@ -0,0 +1,87 @@ +package sarama + +// DescribeLogDirsRequest is a describe request to get partitions' log size +type DescribeLogDirsRequest struct { + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + // If this is an empty array, all topics will be queried + DescribeTopics []DescribeLogDirsRequestTopic +} + +// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic +type DescribeLogDirsRequestTopic struct { + Topic string + PartitionIDs []int32 +} + +func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error { + length := len(r.DescribeTopics) + if length == 0 { + // In order to query all topics we must send null + length = -1 + } + + if err := pe.putArrayLength(length); err != nil { + return err + } + + for _, d := range r.DescribeTopics { + if err := pe.putString(d.Topic); err != nil { + return err + } + + if err := pe.putInt32Array(d.PartitionIDs); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + topics := make([]DescribeLogDirsRequestTopic, n) + for i := 0; i < n; i++ { + topics[i] = DescribeLogDirsRequestTopic{} + + topic, err := pd.getString() + if err != nil { + return err + } + topics[i].Topic = topic + + pIDs, err := pd.getInt32Array() + if err != nil { + return err + } + topics[i].PartitionIDs = pIDs + } + r.DescribeTopics = topics + + return nil +} + +func (r *DescribeLogDirsRequest) key() int16 { + return 35 +} + +func (r *DescribeLogDirsRequest) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go new file mode 100644 index 00000000000..411da38ad20 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go @@ -0,0 +1,229 @@ +package sarama + +import "time" + +type DescribeLogDirsResponse struct { + ThrottleTime time.Duration + + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + LogDirs []DescribeLogDirsResponseDirMetadata +} + +func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.LogDirs)); err != nil { + return err + } + + for _, dir := range r.LogDirs { + if err := dir.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Decode array of DescribeLogDirsResponseDirMetadata + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n) + for i := 0; i < n; i++ { + dir := DescribeLogDirsResponseDirMetadata{} + if err := dir.decode(pd, version); err != nil { + return err + } + r.LogDirs[i] = dir + } + + return nil +} + +func (r *DescribeLogDirsResponse) key() int16 { + return 35 +} + +func (r *DescribeLogDirsResponse) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type DescribeLogDirsResponseDirMetadata struct { + ErrorCode KError + + // The absolute log directory path + Path string + Topics []DescribeLogDirsResponseTopic +} + +func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + + if err := pe.putString(r.Path); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } + for _, topic := range r.Topics { + if err := topic.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(errCode) + + path, err := pd.getString() + if err != nil { + return err + } + r.Path = path + + // Decode array of DescribeLogDirsResponseTopic + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]DescribeLogDirsResponseTopic, n) + for i := 0; i < n; i++ { + t := DescribeLogDirsResponseTopic{} + + if err := t.decode(pd, version); err != nil { + return err + } + + r.Topics[i] = t + } + + return nil +} + +// DescribeLogDirsResponseTopic contains a topic's partitions descriptions +type DescribeLogDirsResponseTopic struct { + Topic string + Partitions []DescribeLogDirsResponsePartition +} + +func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putString(r.Topic); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } + for _, partition := range r.Partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error { + t, err := pd.getString() + if err != nil { + return err + } + r.Topic = t + + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Partitions = make([]DescribeLogDirsResponsePartition, n) + for i := 0; i < n; i++ { + p := DescribeLogDirsResponsePartition{} + if err := p.decode(pd, version); err != nil { + return err + } + r.Partitions[i] = p + } + + return nil +} + +// DescribeLogDirsResponsePartition describes a partition's log directory +type DescribeLogDirsResponsePartition struct { + PartitionID int32 + + // The size of the log segments of the partition in bytes. + Size int64 + + // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or + // current replica's LEO (if it is the future log for the partition) + OffsetLag int64 + + // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of + // the replica in the future. + IsTemporary bool +} + +func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error { + pe.putInt32(r.PartitionID) + pe.putInt64(r.Size) + pe.putInt64(r.OffsetLag) + pe.putBool(r.IsTemporary) + + return nil +} + +func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error { + pID, err := pd.getInt32() + if err != nil { + return err + } + r.PartitionID = pID + + size, err := pd.getInt64() + if err != nil { + return err + } + r.Size = size + + lag, err := pd.getInt64() + if err != nil { + return err + } + r.OffsetLag = lag + + isTemp, err := pd.getBool() + if err != nil { + return err + } + r.IsTemporary = isTemp + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go new file mode 100644 index 00000000000..b5b59404bdc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go @@ -0,0 +1,70 @@ +package sarama + +// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names +type DescribeUserScramCredentialsRequest struct { + // Version 0 is currently only supported + Version int16 + + // If this is an empty array, all users will be queried + DescribeUsers []DescribeUserScramCredentialsRequestUser +} + +// DescribeUserScramCredentialsRequestUser is a describe request about specific user name +type DescribeUserScramCredentialsRequestUser struct { + Name string +} + +func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error { + pe.putCompactArrayLength(len(r.DescribeUsers)) + for _, d := range r.DescribeUsers { + if err := pe.putCompactString(d.Name); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n) + for i := 0; i < n; i++ { + r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{} + if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *DescribeUserScramCredentialsRequest) key() int16 { + return 50 +} + +func (r *DescribeUserScramCredentialsRequest) version() int16 { + return r.Version +} + +func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 { + return 2 +} + +func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go new file mode 100644 index 00000000000..2656c2faa1c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go @@ -0,0 +1,168 @@ +package sarama + +import "time" + +type ScramMechanismType int8 + +const ( + SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0 + SCRAM_MECHANISM_SHA_256 // 1 + SCRAM_MECHANISM_SHA_512 // 2 +) + +func (s ScramMechanismType) String() string { + switch s { + case 1: + return SASLTypeSCRAMSHA256 + case 2: + return SASLTypeSCRAMSHA512 + default: + return "Unknown" + } +} + +type DescribeUserScramCredentialsResponse struct { + // Version 0 is currently only supported + Version int16 + + ThrottleTime time.Duration + + ErrorCode KError + ErrorMessage *string + + Results []*DescribeUserScramCredentialsResult +} + +type DescribeUserScramCredentialsResult struct { + User string + + ErrorCode KError + ErrorMessage *string + + CredentialInfos []*UserScramCredentialsResponseInfo +} + +type UserScramCredentialsResponseInfo struct { + Mechanism ScramMechanismType + Iterations int32 +} + +func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Results)) + for _, u := range r.Results { + if err := pe.putCompactString(u.User); err != nil { + return err + } + pe.putInt16(int16(u.ErrorCode)) + if err := pe.putNullableCompactString(u.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(u.CredentialInfos)) + for _, c := range u.CredentialInfos { + pe.putInt8(int8(c.Mechanism)) + pe.putInt32(c.Iterations) + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numUsers, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numUsers > 0 { + r.Results = make([]*DescribeUserScramCredentialsResult, numUsers) + for i := 0; i < numUsers; i++ { + r.Results[i] = &DescribeUserScramCredentialsResult{} + if r.Results[i].User, err = pd.getCompactString(); err != nil { + return err + } + + errorCode, err := pd.getInt16() + if err != nil { + return err + } + r.Results[i].ErrorCode = KError(errorCode) + if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numCredentialInfos, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos) + for j := 0; j < numCredentialInfos; j++ { + r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{} + scramMechanism, err := pd.getInt8() + if err != nil { + return err + } + r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism) + if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil { + return err + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return nil +} + +func (r *DescribeUserScramCredentialsResponse) key() int16 { + return 50 +} + +func (r *DescribeUserScramCredentialsResponse) version() int16 { + return r.Version +} + +func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 { + return 2 +} + +func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion { + return V2_7_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml new file mode 100644 index 00000000000..7bf9ff9184d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -0,0 +1,10 @@ +name: sarama + +up: + - go: + version: '1.16' + +commands: + test: + run: make test + desc: 'run unit tests' diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml new file mode 100644 index 00000000000..8e9c24e3db0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -0,0 +1,134 @@ +version: '3.7' +services: + zookeeper-1: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '1' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-2: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '2' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-3: + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '3' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + kafka-1: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '1' + KAFKA_BROKER_RACK: '1' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-2: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '2' + KAFKA_BROKER_RACK: '2' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-3: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '3' + KAFKA_BROKER_RACK: '3' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-4: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '4' + KAFKA_BROKER_RACK: '4' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-5: + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '5' + KAFKA_BROKER_RACK: '5' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + toxiproxy: + image: 'shopify/toxiproxy:2.1.4' + ports: + # The tests themselves actually start the proxies on these ports + - '29091:29091' + - '29092:29092' + - '29093:29093' + - '29094:29094' + - '29095:29095' + # This is the toxiproxy API port + - '8474:8474' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 00000000000..dab54f88cc7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,94 @@ +package sarama + +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// decode takes bytes and a decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 00000000000..6635425ddd6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,54 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 00000000000..763976726cc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,48 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go new file mode 100644 index 00000000000..0fca0a30eab --- /dev/null +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -0,0 +1,409 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID. +var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + +// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version +// is lower than 0.10.0.0. +var ErrControllerNotAvailable = errors.New("kafka: controller is not available") + +// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update +// the metadata. +var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") + +// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism +var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// MultiError is used to contain multi error. +type MultiError struct { + Errors *[]error +} + +func (mErr MultiError) Error() string { + errString := "" + for _, err := range *mErr.Errors { + errString += err.Error() + "," + } + return errString +} + +func (mErr MultiError) PrettyError() string { + errString := "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +type ErrDeleteRecords struct { + MultiError +} + +func (err ErrDeleteRecords) Error() string { + return "kafka server: failed to delete records " + err.MultiError.Error() +} + +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 + ErrDelegationTokenAuthDisabled KError = 61 + ErrDelegationTokenNotFound KError = 62 + ErrDelegationTokenOwnerMismatch KError = 63 + ErrDelegationTokenRequestNotAllowed KError = 64 + ErrDelegationTokenAuthorizationFailed KError = 65 + ErrDelegationTokenExpired KError = 66 + ErrInvalidPrincipalType KError = 67 + ErrNonEmptyGroup KError = 68 + ErrGroupIDNotFound KError = 69 + ErrFetchSessionIDNotFound KError = 70 + ErrInvalidFetchSessionEpoch KError = 71 + ErrListenerNotFound KError = 72 + ErrTopicDeletionDisabled KError = 73 + ErrFencedLeaderEpoch KError = 74 + ErrUnknownLeaderEpoch KError = 75 + ErrUnsupportedCompressionType KError = 76 + ErrStaleBrokerEpoch KError = 77 + ErrOffsetNotAvailable KError = 78 + ErrMemberIdRequired KError = 79 + ErrPreferredLeaderNotAvailable KError = 80 + ErrGroupMaxSizeReached KError = 81 + ErrFencedInstancedId KError = 82 + ErrEligibleLeadersNotAvailable KError = 83 + ErrElectionNotNeeded KError = 84 + ErrNoReassignmentInProgress KError = 85 + ErrGroupSubscribedToTopic KError = 86 + ErrInvalidRecord KError = 87 + ErrUnstableOffsetCommit KError = 88 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://kafka.apache.org/protocol#protocol_error_codes + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica information not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." + case ErrDelegationTokenAuthDisabled: + return "kafka server: Delegation Token feature is not enabled." + case ErrDelegationTokenNotFound: + return "kafka server: Delegation Token is not found on server." + case ErrDelegationTokenOwnerMismatch: + return "kafka server: Specified Principal is not valid Owner/Renewer." + case ErrDelegationTokenRequestNotAllowed: + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + case ErrDelegationTokenAuthorizationFailed: + return "kafka server: Delegation Token authorization failed." + case ErrDelegationTokenExpired: + return "kafka server: Delegation Token is expired." + case ErrInvalidPrincipalType: + return "kafka server: Supplied principalType is not supported." + case ErrNonEmptyGroup: + return "kafka server: The group is not empty." + case ErrGroupIDNotFound: + return "kafka server: The group id does not exist." + case ErrFetchSessionIDNotFound: + return "kafka server: The fetch session ID was not found." + case ErrInvalidFetchSessionEpoch: + return "kafka server: The fetch session epoch is invalid." + case ErrListenerNotFound: + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + case ErrTopicDeletionDisabled: + return "kafka server: Topic deletion is disabled." + case ErrFencedLeaderEpoch: + return "kafka server: The leader epoch in the request is older than the epoch on the broker." + case ErrUnknownLeaderEpoch: + return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + case ErrUnsupportedCompressionType: + return "kafka server: The requesting client does not support the compression type of given partition." + case ErrStaleBrokerEpoch: + return "kafka server: Broker epoch has changed" + case ErrOffsetNotAvailable: + return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing" + case ErrMemberIdRequired: + return "kafka server: The group member needs to have a valid member id before actually entering a consumer group" + case ErrPreferredLeaderNotAvailable: + return "kafka server: The preferred leader was not available" + case ErrGroupMaxSizeReached: + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + case ErrFencedInstancedId: + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." + case ErrEligibleLeadersNotAvailable: + return "kafka server: Eligible topic partition leaders are not available." + case ErrElectionNotNeeded: + return "kafka server: Leader election not needed for topic partition." + case ErrNoReassignmentInProgress: + return "kafka server: No partition reassignment is in progress." + case ErrGroupSubscribedToTopic: + return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it." + case ErrInvalidRecord: + return "kafka server: This record has failed the validation on broker and hence will be rejected." + case ErrUnstableOffsetCommit: + return "kafka server: There are unstable offsets that need to be cleared." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 00000000000..f893aeff7d5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,295 @@ +package sarama + +type fetchRequestBlock struct { + Version int16 + currentLeaderEpoch int32 + fetchOffset int64 + logStartOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { + b.Version = version + if b.Version >= 9 { + pe.putInt32(b.currentLeaderEpoch) + } + pe.putInt64(b.fetchOffset) + if b.Version >= 5 { + pe.putInt64(b.logStartOffset) + } + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { + b.Version = version + if b.Version >= 9 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.Version >= 5 { + if b.logStartOffset, err = pd.getInt64(); err != nil { + return err + } + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + SessionID int32 + SessionEpoch int32 + blocks map[string]map[int32]*fetchRequestBlock + forgotten map[string][]int32 + RackID string +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = iota + ReadCommitted +) + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + if r.Version >= 7 { + pe.putInt32(r.SessionID) + pe.putInt32(r.SessionEpoch) + } + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + } + if r.Version >= 7 { + err = pe.putArrayLength(len(r.forgotten)) + if err != nil { + return err + } + for topic, partitions := range r.forgotten { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for _, partition := range partitions { + pe.putInt32(partition) + } + } + } + if r.Version >= 11 { + err = pe.putString(r.RackID) + if err != nil { + return err + } + } + + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } + if r.Version >= 7 { + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + r.SessionEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = fetchBlock + } + } + + if r.Version >= 7 { + forgottenCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten = make(map[string][]int32) + for i := 0; i < forgottenCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten[topic] = make([]int32, partitionCount) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.forgotten[topic][j] = partition + } + } + } + + if r.Version >= 11 { + r.RackID, err = pd.getString() + if err != nil { + return err + } + } + + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return MinVersion + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 + default: + return MaxVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.Version >= 7 && r.forgotten == nil { + r.forgotten = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.Version = r.Version + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + if r.Version >= 9 { + tmp.currentLeaderEpoch = int32(-1) + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go new file mode 100644 index 00000000000..54b88284ad9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -0,0 +1,548 @@ +package sarama + +import ( + "sort" + "time" +) + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions []*AbortedTransaction + PreferredReadReplica int32 + Records *Records // deprecated: use FetchResponseBlock.RecordsSet + RecordsSet []*Records + Partial bool +} + +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 5 { + b.LogStartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + if version >= 11 { + b.PreferredReadReplica, err = pd.getInt32() + if err != nil { + return err + } + } else { + b.PreferredReadReplica = -1 + } + + recordsSize, err := pd.getInt32() + if err != nil { + return err + } + + recordsDecoder, err := pd.getSubset(int(recordsSize)) + if err != nil { + return err + } + + b.RecordsSet = []*Records{} + + for recordsDecoder.remaining() > 0 { + records := &Records{} + if err := records.decode(recordsDecoder); err != nil { + // If we have at least one decoded records, this is not an error + if err == ErrInsufficientData { + if len(b.RecordsSet) == 0 { + b.Partial = true + } + break + } + return err + } + + partial, err := records.isPartial() + if err != nil { + return err + } + + n, err := records.numRecords() + if err != nil { + return err + } + + if n > 0 || (partial && len(b.RecordsSet) == 0) { + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } + + overflow, err := records.isOverflow() + if err != nil { + return err + } + + if partial || overflow { + break + } + } + + return nil +} + +func (b *FetchResponseBlock) numRecords() (int, error) { + sum := 0 + + for _, records := range b.RecordsSet { + count, err := records.numRecords() + if err != nil { + return 0, err + } + + sum += count + } + + return sum, nil +} + +func (b *FetchResponseBlock) isPartial() (bool, error) { + if b.Partial { + return true, nil + } + + if len(b.RecordsSet) == 1 { + return b.RecordsSet[0].isPartial() + } + + return false, nil +} + +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + pe.putInt64(b.HighWaterMarkOffset) + + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if version >= 5 { + pe.putInt64(b.LogStartOffset) + } + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } + + if version >= 11 { + pe.putInt32(b.PreferredReadReplica) + } + + pe.push(&lengthField{}) + for _, records := range b.RecordsSet { + err = records.encode(pe) + if err != nil { + return err + } + } + return pe.pop() +} + +func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { + // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered + // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself + at := b.AbortedTransactions + sort.Slice( + at, + func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, + ) + return at +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + ErrorCode int16 + SessionID int32 + Version int16 + LogAppendTime bool + Timestamp time.Time +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + + if r.Version >= 7 { + r.ErrorCode, err = pd.getInt16() + if err != nil { + return err + } + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + if r.Version >= 7 { + pe.putInt16(r.ErrorCode) + pe.putInt32(r.SessionID) + } + + err = pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 0: + return MinVersion + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 + default: + return MaxVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + + return kb, vb +} + +func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if r.LogAppendTime { + timestamp = r.Timestamp + } + msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + if len(frb.RecordsSet) == 0 { + records := newLegacyRecords(&MessageSet{}) + frb.RecordsSet = []*Records{&records} + } + set := frb.RecordsSet[0].MsgSet + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) +} + +// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp +// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse +// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions +func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: isTransactional, + } + rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + records.RecordBatch = batch + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + + // batch + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: true, + Control: true, + } + + // records + records := newDefaultRecords(nil) + records.RecordBatch = batch + + // record + crAbort := ControlRecord{ + Version: 0, + Type: recordType, + } + crKey := &realEncoder{raw: make([]byte, 4)} + crValue := &realEncoder{raw: make([]byte, 6)} + crAbort.encode(crKey, crValue) + rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) +} + +func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { + r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) +} + +func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { + // define controlRecord key and value + r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go new file mode 100644 index 00000000000..597bcbf786f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -0,0 +1,65 @@ +package sarama + +type CoordinatorType int8 + +const ( + CoordinatorGroup CoordinatorType = iota + CoordinatorTransaction +) + +type FindCoordinatorRequest struct { + Version int16 + CoordinatorKey string + CoordinatorType CoordinatorType +} + +func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { + if err := pe.putString(f.CoordinatorKey); err != nil { + return err + } + + if f.Version >= 1 { + pe.putInt8(int8(f.CoordinatorType)) + } + + return nil +} + +func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { + if f.CoordinatorKey, err = pd.getString(); err != nil { + return err + } + + if version >= 1 { + f.Version = version + coordinatorType, err := pd.getInt8() + if err != nil { + return err + } + + f.CoordinatorType = CoordinatorType(coordinatorType) + } + + return nil +} + +func (f *FindCoordinatorRequest) key() int16 { + return 10 +} + +func (f *FindCoordinatorRequest) version() int16 { + return f.Version +} + +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + +func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go new file mode 100644 index 00000000000..83a648ad4ae --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -0,0 +1,96 @@ +package sarama + +import ( + "time" +) + +var NoNode = &Broker{id: -1, addr: ":-1"} + +type FindCoordinatorResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + Coordinator *Broker +} + +func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + f.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(tmp) + + if version >= 1 { + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + coordinator := new(Broker) + // The version is hardcoded to 0, as version 1 of the Broker-decode + // contains the rack-field which is not present in the FindCoordinatorResponse. + if err := coordinator.decode(pd, 0); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + f.Coordinator = coordinator + + return nil +} + +func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { + if f.Version >= 1 { + pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) + } + + pe.putInt16(int16(f.Err)) + + if f.Version >= 1 { + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + } + + coordinator := f.Coordinator + if coordinator == nil { + coordinator = NoNode + } + if err := coordinator.encode(pe, 0); err != nil { + return err + } + return nil +} + +func (f *FindCoordinatorResponse) key() int16 { + return 10 +} + +func (f *FindCoordinatorResponse) version() int16 { + return f.Version +} + +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + +func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod new file mode 100644 index 00000000000..ccbd8e2d132 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -0,0 +1,28 @@ +module github.com/Shopify/sarama + +go 1.13 + +require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/eapache/go-resiliency v1.2.0 + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 + github.com/eapache/queue v1.1.0 + github.com/fortytw2/leaktest v1.3.0 + github.com/frankban/quicktest v1.11.3 // indirect + github.com/jcmturner/gofork v1.0.0 + github.com/jcmturner/gokrb5/v8 v8.4.2 + github.com/klauspost/compress v1.12.2 + github.com/kr/text v0.2.0 // indirect + github.com/pierrec/lz4 v2.6.0+incompatible + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/stretchr/testify v1.7.0 + github.com/xdg/scram v1.0.3 + github.com/xdg/stringprep v1.0.3 // indirect + golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect + golang.org/x/net v0.0.0-20210614182718-04defd469f4e + golang.org/x/text v0.3.6 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum new file mode 100644 index 00000000000..a49776049da --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -0,0 +1,93 @@ +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a h1:njMmldwFTyDLqonHMagNXKBWptTBeDZOdblgaDsNEGQ= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go new file mode 100644 index 00000000000..ab8b70196f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -0,0 +1,253 @@ +package sarama + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "strings" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "github.com/jcmturner/gokrb5/v8/asn1tools" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/chksumtype" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +const ( + TOK_ID_KRB_AP_REQ = 256 + GSS_API_GENERIC_TAG = 0x60 + KRB5_USER_AUTH = 1 + KRB5_KEYTAB_AUTH = 2 + GSS_API_INITIAL = 1 + GSS_API_VERIFY = 2 + GSS_API_FINISH = 3 +) + +type GSSAPIConfig struct { + AuthType int + KeyTabPath string + KerberosConfigPath string + ServiceName string + Username string + Password string + Realm string + DisablePAFXFAST bool +} + +type GSSAPIKerberosAuth struct { + Config *GSSAPIConfig + ticket messages.Ticket + encKey types.EncryptionKey + NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error) + step int +} + +type KerberosClient interface { + Login() error + GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) + Domain() string + CName() types.PrincipalName + Destroy() +} + +// writePackage appends length in big endian before the payload, and sends it to kafka +func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { + length := uint64(len(payload)) + size := length + 4 // 4 byte length header + payload + if size > math.MaxInt32 { + return 0, errors.New("payload too large, will overflow int32") + } + finalPackage := make([]byte, size) + copy(finalPackage[4:], payload) + binary.BigEndian.PutUint32(finalPackage, uint32(length)) + bytes, err := broker.conn.Write(finalPackage) + if err != nil { + return bytes, err + } + return bytes, nil +} + +// readPackage reads payload length (4 bytes) and then reads the payload into []byte +func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { + bytesRead := 0 + lengthInBytes := make([]byte, 4) + bytes, err := io.ReadFull(broker.conn, lengthInBytes) + if err != nil { + return nil, bytesRead, err + } + bytesRead += bytes + payloadLength := binary.BigEndian.Uint32(lengthInBytes) + payloadBytes := make([]byte, payloadLength) // buffer for read.. + bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes + if err != nil { + return payloadBytes, bytesRead, err + } + bytesRead += bytes + return payloadBytes, bytesRead, nil +} + +func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte { + a := make([]byte, 24) + flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf} + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} + +/* +* +* Construct Kerberos AP_REQ package, conforming to RFC-4120 +* https://tools.ietf.org/html/rfc4120#page-84 +* + */ +func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( + domain string, cname types.PrincipalName, + ticket messages.Ticket, + sessionKey types.EncryptionKey) ([]byte, error) { + auth, err := types.NewAuthenticator(domain, cname) + if err != nil { + return nil, err + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: krbAuth.newAuthenticatorChecksum(), + } + APReq, err := messages.NewAPReq( + ticket, + sessionKey, + auth, + ) + if err != nil { + return nil, err + } + aprBytes := make([]byte, 2) + binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ) + tb, err := APReq.Marshal() + if err != nil { + return nil, err + } + aprBytes = append(aprBytes, tb...) + return aprBytes, nil +} + +/* +* +* Append the GSS-API header to the payload, conforming to RFC-2743 +* Section 3.1, Mechanism-Independent Token Format +* +* https://tools.ietf.org/html/rfc2743#page-81 +* +* GSSAPIHeader + +* + */ +func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { + oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID()) + if err != nil { + return nil, err + } + tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload)) + GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...) + GSSHeader = append(GSSHeader, oidBytes...) + GSSPackage := append(GSSHeader, payload...) + return GSSPackage, nil +} + +func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) { + switch krbAuth.step { + case GSS_API_INITIAL: + aprBytes, err := krbAuth.createKrb5Token( + kerberosClient.Domain(), + kerberosClient.CName(), + krbAuth.ticket, + krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_VERIFY + return krbAuth.appendGSSAPIHeader(aprBytes) + case GSS_API_VERIFY: + wrapTokenReq := gssapi.WrapToken{} + if err := wrapTokenReq.Unmarshal(bytes, true); err != nil { + return nil, err + } + // Validate response. + isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL) + if !isValid { + return nil, err + } + + wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_FINISH + return wrapTokenResponse.Marshal() + } + return nil, nil +} + +/* This does the handshake for authorization */ +func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { + kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + + err = kerberosClient.Login() + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + // Construct SPN using serviceName and host + // SPN format: / + + host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part + spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + + ticket, encKey, err := kerberosClient.GetServiceTicket(spn) + if err != nil { + Logger.Printf("Error getting Kerberos service ticket : %s", err) + return err + } + krbAuth.ticket = ticket + krbAuth.encKey = encKey + krbAuth.step = GSS_API_INITIAL + var receivedBytes []byte = nil + defer kerberosClient.Destroy() + for { + packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + requestTime := time.Now() + bytesWritten, err := krbAuth.writePackage(broker, packBytes) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + broker.updateOutgoingCommunicationMetrics(bytesWritten) + if krbAuth.step == GSS_API_VERIFY { + bytesRead := 0 + receivedBytes, bytesRead, err = krbAuth.readPackage(broker) + requestLatency := time.Since(requestTime) + broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + } else if krbAuth.step == GSS_API_FINISH { + return nil + } + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go new file mode 100644 index 00000000000..e9d9af19110 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -0,0 +1,51 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go new file mode 100644 index 00000000000..577ab72e574 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -0,0 +1,36 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go new file mode 100644 index 00000000000..c4d05a97204 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go @@ -0,0 +1,173 @@ +package sarama + +type IncrementalAlterConfigsOperation int8 + +const ( + IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota + IncrementalAlterConfigsOperationDelete + IncrementalAlterConfigsOperationAppend + IncrementalAlterConfigsOperationSubtract +) + +// IncrementalAlterConfigsRequest is an incremental alter config request type +type IncrementalAlterConfigsRequest struct { + Resources []*IncrementalAlterConfigsResource + ValidateOnly bool +} + +type IncrementalAlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]IncrementalAlterConfigsEntry +} + +type IncrementalAlterConfigsEntry struct { + Operation IncrementalAlterConfigsOperation + Value *string +} + +func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, r := range a.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(a.ValidateOnly) + return nil +} + +func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount) + for i := range a.Resources { + r := &IncrementalAlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + a.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + a.ValidateOnly = validateOnly + + return nil +} + +func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) + + if err := pe.putString(a.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { + return err + } + + for name, e := range a.ConfigEntries { + if err := pe.putString(name); err != nil { + return err + } + + if err := e.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n) + for i := 0; i < n; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + var v IncrementalAlterConfigsEntry + + if err := v.decode(pd, version); err != nil { + return err + } + + a.ConfigEntries[name] = v + } + } + return err +} + +func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Operation)) + + if err := pe.putNullableString(a.Value); err != nil { + return err + } + + return nil +} + +func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = IncrementalAlterConfigsOperation(t) + + s, err := pd.getNullableString() + if err != nil { + return err + } + + a.Value = s + + return nil +} + +func (a *IncrementalAlterConfigsRequest) key() int16 { + return 44 +} + +func (a *IncrementalAlterConfigsRequest) version() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion { + return V2_3_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go new file mode 100644 index 00000000000..3e8c4500c32 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go @@ -0,0 +1,66 @@ +package sarama + +import "time" + +// IncrementalAlterConfigsResponse is a response type for incremental alter config +type IncrementalAlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, v := range a.Resources { + if err := v.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) + + if err := a.Resources[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (a *IncrementalAlterConfigsResponse) key() int16 { + return 44 +} + +func (a *IncrementalAlterConfigsResponse) version() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion { + return V2_3_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 00000000000..689444397d6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,47 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 00000000000..3e1242bf622 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,59 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go new file mode 100644 index 00000000000..d0d33e526f8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/interceptors.go @@ -0,0 +1,43 @@ +package sarama + +// ProducerInterceptor allows you to intercept (and possibly mutate) the records +// received by the producer before they are published to the Kafka cluster. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ProducerInterceptor interface { + + // OnSend is called when the producer message is intercepted. Please avoid + // modifying the message until it's safe to do so, as this is _not_ a copy + // of the message. + OnSend(*ProducerMessage) +} + +// ConsumerInterceptor allows you to intercept (and possibly mutate) the records +// received by the consumer before they are sent to the messages channel. +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation +type ConsumerInterceptor interface { + + // OnConsume is called when the consumed message is intercepted. Please + // avoid modifying the message until it's safe to do so, as this is _not_ a + // copy of the message. + OnConsume(*ConsumerMessage) +} + +func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnSend(msg) +} + +func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) { + defer func() { + if r := recover(); r != nil { + Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r) + } + }() + + interceptor.OnConsume(msg) +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go new file mode 100644 index 00000000000..3734e82e406 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -0,0 +1,167 @@ +package sarama + +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + +type JoinGroupRequest struct { + Version int16 + GroupId string + SessionTimeout int32 + RebalanceTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if r.Version >= 1 { + pe.putInt32(r.RebalanceTimeout) + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { + return err + } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if version >= 1 { + if r.RebalanceTimeout, err = pd.getInt32(); err != nil { + return err + } + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { + return err + } + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return r.Version +} + +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata, nil) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go new file mode 100644 index 00000000000..54b0a45c28e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -0,0 +1,139 @@ +package sarama + +type JoinGroupResponse struct { + Version int16 + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTime) + } + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 2 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return r.Version +} + +func (r *JoinGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go new file mode 100644 index 00000000000..01a53193bb9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/kerberos_client.go @@ -0,0 +1,46 @@ +package sarama + +import ( + krb5client "github.com/jcmturner/gokrb5/v8/client" + krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/types" +) + +type KerberosGoKrb5Client struct { + krb5client.Client +} + +func (c *KerberosGoKrb5Client) Domain() string { + return c.Credentials.Domain() +} + +func (c *KerberosGoKrb5Client) CName() types.PrincipalName { + return c.Credentials.CName() +} + +// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens. +// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120). +// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities. +func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { + cfg, err := krb5config.Load(config.KerberosConfigPath) + if err != nil { + return nil, err + } + return createClient(config, cfg) +} + +func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { + var client *krb5client.Client + if config.AuthType == KRB5_KEYTAB_AUTH { + kt, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, err + } + client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } else { + client = krb5client.NewWithPassword(config.Username, + config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + } + return &KerberosGoKrb5Client{*client}, nil +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go new file mode 100644 index 00000000000..d7789b68dbe --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -0,0 +1,44 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go new file mode 100644 index 00000000000..25f8d5eb36b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -0,0 +1,36 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go new file mode 100644 index 00000000000..7d864f6bf97 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -0,0 +1,99 @@ +package sarama + +import ( + "encoding/binary" + "sync" +) + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int + length int32 +} + +var lengthFieldPool = sync.Pool{} + +func acquireLengthField() *lengthField { + val := lengthFieldPool.Get() + if val != nil { + return val.(*lengthField) + } + return &lengthField{} +} + +func releaseLengthField(m *lengthField) { + lengthFieldPool.Put(m) +} + +func (l *lengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getInt32() + if err != nil { + return err + } + if l.length > int32(pd.remaining()) { + return ErrInsufficientData + } + return nil +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if int32(curOffset-l.startOffset-4) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go new file mode 100644 index 00000000000..4553b2d2ea0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -0,0 +1,27 @@ +package sarama + +type ListGroupsRequest struct{} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go new file mode 100644 index 00000000000..777bae7e63e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -0,0 +1,73 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go new file mode 100644 index 00000000000..c1ffa9ba02b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go new file mode 100644 index 00000000000..4baa6a08e83 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go new file mode 100644 index 00000000000..fd0d1d90b7f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message.go @@ -0,0 +1,168 @@ +package sarama + +import ( + "fmt" + "time" +) + +const ( + // CompressionNone no compression + CompressionNone CompressionCodec = iota + // CompressionGZIP compression using GZIP + CompressionGZIP + // CompressionSnappy compression using snappy + CompressionSnappy + // CompressionLZ4 compression using LZ4 + CompressionLZ4 + // CompressionZSTD compression using ZSTD + CompressionZSTD + + // The lowest 3 bits contain the compression codec used for the message + compressionCodecMask int8 = 0x07 + + // Bit 3 set for "LogAppend" timestamps + timestampTypeMask = 0x08 + + // CompressionLevelDefault is the constant to use in CompressionLevel + // to have the default compression level for any codec. The value is picked + // that we don't use any existing compression levels. + CompressionLevelDefault = -1000 +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +func (cc CompressionCodec) String() string { + return []string{ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + }[int(cc)] +} + +// Message is a kafka message type +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte + compressedSize int // used for computing the compression ratio metrics +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(newCRC32Field(crcIEEE)) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + if m.LogAppendTime { + attributes |= timestampTypeMask + } + pe.putInt8(attributes) + + if m.Version >= 1 { + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + payload, err = compress(m.Codec, m.CompressionLevel, m.Value) + if err != nil { + return err + } + m.compressedCache = payload + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + crc32Decoder := acquireCrc32Field(crcIEEE) + defer releaseCrc32Field(crc32Decoder) + + err = pd.push(crc32Decoder) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask + + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { + return err + } + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + + if m.Value != nil && m.Codec != CompressionNone { + m.Value, err = decompress(m.Codec, m.Value) + if err != nil { + return err + } + + if err := m.decodeSet(); err != nil { + return err + } + } + + return pd.pop() +} + +// decodes a message set from a previously encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go new file mode 100644 index 00000000000..6523ec2f74d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -0,0 +1,111 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + lengthDecoder := acquireLengthField() + defer releaseLengthField(lengthDecoder) + + if err = pd.push(lengthDecoder); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + OverflowMessage bool // whether the set on the wire contained an overflow message + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + magic, err := magicValue(pd) + if err != nil { + if err == ErrInsufficientData { + ms.PartialTrailingMessage = true + return nil + } + return err + } + + if magic > 1 { + return nil + } + + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + if msb.Offset == -1 { + // This is an overflow message caused by chunked down conversion + ms.OverflowMessage = true + } else { + ms.PartialTrailingMessage = true + } + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go new file mode 100644 index 00000000000..e835f5a9c8a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -0,0 +1,85 @@ +package sarama + +type MetadataRequest struct { + Version int16 + Topics []string + AllowAutoTopicCreation bool +} + +func (r *MetadataRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 5 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} + } + if r.Version == 0 || len(r.Topics) > 0 { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else { + pe.putInt32(-1) + } + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + size, err := pd.getInt32() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + } + if r.Version > 3 { + autoCreation, err := pd.getBool() + if err != nil { + return err + } + r.AllowAutoTopicCreation = autoCreation + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return r.Version +} + +func (r *MetadataRequest) headerVersion() int16 { + return 1 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go new file mode 100644 index 00000000000..0bb8702cc37 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -0,0 +1,325 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + if version >= 5 { + pm.OfflineReplicas, err = pd.getInt32Array() + if err != nil { + return err + } + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + if version >= 5 { + err = pe.putInt32Array(pm.OfflineReplicas) + if err != nil { + return err + } + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + IsInternal bool // Only valid for Version >= 1 + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + if version >= 1 { + tm.IsInternal, err = pd.getBool() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + if version >= 1 { + pe.putBool(tm.IsInternal) + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe, version) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Version int16 + ThrottleTimeMs int32 + Brokers []*Broker + ClusterID *string + ControllerID int32 + Topics []*TopicMetadata +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd, version) + if err != nil { + return err + } + } + + if version >= 2 { + r.ClusterID, err = pd.getNullableString() + if err != nil { + return err + } + } + + if version >= 1 { + r.ControllerID, err = pd.getInt32() + if err != nil { + return err + } + } else { + r.ControllerID = -1 + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd, version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + + err := pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + for _, broker := range r.Brokers { + err = broker.encode(pe, r.Version) + if err != nil { + return err + } + } + + if r.Version >= 2 { + err := pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putInt32(r.ControllerID) + } + + err = pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + for _, tm := range r.Topics { + err = tm.encode(pe, r.Version) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return r.Version +} + +func (r *MetadataResponse) headerVersion() int16 { + return 0 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.OfflineReplicas = offline + pmatch.Err = err +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 00000000000..90e5a87f497 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,43 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go new file mode 100644 index 00000000000..c2654d12edb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -0,0 +1,422 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type GSSApiHandlerFunc func([]byte) []byte + +type requestHandlerFunc func(req *request) (res encoderWithHeader) + +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshalling, response marshalling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoderWithHeader + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex + gssApiHandler GSSApiHandlerFunc +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoderWithHeader) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { + b.gssApiHandler = handler +} + +func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + bytesRead += len(encodedReq) + + fullBytes := append(lengthBytes, encodedReq...) + + return fullBytes, nil +} + +func (b *MockBroker) isGSSAPI(buffer []byte) bool { + return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) +} + +func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + s := spew.NewDefaultConfig() + s.MaxDepth = 1 + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + var bytesWritten int + var bytesRead int + for { + buffer, err := b.readToBytes(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) + b.serverError(err) + break + } + + bytesWritten = 0 + if !b.isGSSAPI(buffer) { + req, br, err := decodeRequest(bytes.NewReader(buffer)) + bytesRead = br + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf( + "*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s", + b.brokerID, idx, req.body, res, + s.Sprintf("%#v", req.body), + s.Sprintf("%#v", res), + ) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + bytesWritten = len(resHeader) + len(encodedRes) + } else { + // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. + // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism + b.lock.Lock() + res := b.gssApiHandler(buffer) + b.lock.Unlock() + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer)) + continue + } + if _, err = conn.Write(res); err != nil { + b.serverError(err) + break + } + bytesWritten = len(res) + } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, bytesWritten) + } + b.lock.Unlock() + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoderWithHeader, 512), + listener: listener, + } + broker.handler = broker.defaultRequestHandler + + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoderWithHeader) { + b.expectations <- e +} diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go new file mode 100644 index 00000000000..a43607e1c10 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -0,0 +1,125 @@ +package sarama + +import ( + "encoding/binary" + "encoding/hex" + + "github.com/jcmturner/gokrb5/v8/credentials" + "github.com/jcmturner/gokrb5/v8/gssapi" + "github.com/jcmturner/gokrb5/v8/iana/keyusage" + "github.com/jcmturner/gokrb5/v8/messages" + "github.com/jcmturner/gokrb5/v8/types" +) + +type KafkaGSSAPIHandler struct { + client *MockKerberosClient + badResponse bool + badKeyChecksum bool +} + +func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { + // Default payload used for verify + err := h.client.Login() // Mock client construct keys when login + if err != nil { + return nil + } + if h.badResponse { // Returns trash + return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} + } + + pack := gssapi.WrapToken{ + Flags: KRB5_USER_AUTH, + EC: 12, + RRC: 0, + SndSeqNum: 3398292281, + Payload: []byte{0x11, 0x00}, // 1100 + } + // Compute checksum + if h.badKeyChecksum { + pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + } else { + err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL) + if err != nil { + return nil + } + } + + packBytes, err := pack.Marshal() + if err != nil { + return nil + } + lenBytes := len(packBytes) + response := make([]byte, lenBytes+4) + copy(response[4:], packBytes) + binary.BigEndian.PutUint32(response, uint32(lenBytes)) + return response +} + +type MockKerberosClient struct { + asRepBytes string + ASRep messages.ASRep + credentials *credentials.Credentials + mockError error + errorStage string +} + +func (c *MockKerberosClient) Login() error { + if c.errorStage == "login" && c.mockError != nil { + return c.mockError + } + c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" + + "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" + + "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" + + "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" + + "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" + + "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" + + "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" + + "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" + + "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" + + "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" + + "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" + + "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" + + "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" + + "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" + + "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885" + apRepBytes, err := hex.DecodeString(c.asRepBytes) + if err != nil { + return err + } + err = c.ASRep.Unmarshal(apRepBytes) + if err != nil { + return err + } + c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty") + _, err = c.ASRep.DecryptEncPart(c.credentials) + if err != nil { + return err + } + return nil +} + +func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + if c.errorStage == "service_ticket" && c.mockError != nil { + return messages.Ticket{}, types.EncryptionKey{}, c.mockError + } + return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil +} + +func (c *MockKerberosClient) Domain() string { + return "EXAMPLE.COM" +} + +func (c *MockKerberosClient) CName() types.PrincipalName { + p := types.PrincipalName{ + NameType: KRB5_USER_AUTH, + NameString: []string{ + "kafka", + "kafka", + }, + } + return p +} + +func (c *MockKerberosClient) Destroy() { + // Do nothing. +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go new file mode 100644 index 00000000000..6654ed07c39 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -0,0 +1,1273 @@ +package sarama + +import ( + "fmt" + "strings" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoderWithHeader) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoderWithHeader +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { + return mw.res +} + +func NewMockWrapper(res encoderWithHeader) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoderWithHeader: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +type MockListGroupsResponse struct { + groups map[string]string + t TestReporter +} + +func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { + return &MockListGroupsResponse{ + groups: make(map[string]string), + t: t, + } +} + +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*ListGroupsRequest) + _ = request + response := &ListGroupsResponse{ + Groups: m.groups, + } + return response +} + +func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { + m.groups[groupID] = protocolType + return m +} + +type MockDescribeGroupsResponse struct { + groups map[string]*GroupDescription + t TestReporter +} + +func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { + return &MockDescribeGroupsResponse{ + t: t, + groups: make(map[string]*GroupDescription), + } +} + +func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { + m.groups[groupID] = description + return m +} + +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*DescribeGroupsRequest) + + response := &DescribeGroupsResponse{} + for _, requestedGroup := range request.Groups { + if group, ok := m.groups[requestedGroup]; ok { + response.Groups = append(response.Groups, group) + } else { + // Mimic real kafka - if a group doesn't exist, return + // an entry with state "Dead" + response.Groups = append(response.Groups, &GroupDescription{ + GroupId: requestedGroup, + State: "Dead", + }) + } + } + + return response +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + controllerID int32 + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { + mmr.controllerID = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{ + Version: metadataRequest.version(), + ControllerID: mmr.controllerID, + } + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + + // Generate set of replicas + var replicas []int32 + var offlineReplicas []int32 + for _, brokerID := range mmr.brokers { + replicas = append(replicas, brokerID) + } + + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter + version int16 +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{Version: mor.version} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int + version int16 +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{ + Version: mfr.version, + } + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. +type MockFindCoordinatorResponse struct { + groupCoordinators map[string]interface{} + transCoordinators map[string]interface{} + t TestReporter +} + +func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { + return &MockFindCoordinatorResponse{ + groupCoordinators: make(map[string]interface{}), + transCoordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = broker + case CoordinatorTransaction: + mr.transCoordinators[group] = broker + } + return mr +} + +func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = kerror + case CoordinatorTransaction: + mr.transCoordinators[group] = kerror + } + return mr +} + +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*FindCoordinatorRequest) + res := &FindCoordinatorResponse{} + var v interface{} + switch req.CoordinatorType { + case CoordinatorGroup: + v = mr.groupCoordinators[req.CoordinatorKey] + case CoordinatorTransaction: + v = mr.transCoordinators[req.CoordinatorKey] + } + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + version int16 + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + error KError + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse { + mr.error = kerror + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{Version: req.Version} + + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + + if res.Version >= 2 { + res.Err = mr.error + } + return res +} + +type MockCreateTopicsResponse struct { + t TestReporter +} + +func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { + return &MockCreateTopicsResponse{t: t} +} + +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateTopicsRequest) + res := &CreateTopicsResponse{ + Version: req.Version, + } + res.TopicErrors = make(map[string]*TopicError) + + for topic := range req.TopicDetails { + if res.Version >= 1 && strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create topic with reserved prefix" + res.TopicErrors[topic] = &TopicError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicErrors[topic] = &TopicError{Err: ErrNoError} + } + return res +} + +type MockDeleteTopicsResponse struct { + t TestReporter +} + +func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { + return &MockDeleteTopicsResponse{t: t} +} + +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteTopicsRequest) + res := &DeleteTopicsResponse{} + res.TopicErrorCodes = make(map[string]KError) + + for _, topic := range req.Topics { + res.TopicErrorCodes[topic] = ErrNoError + } + res.Version = req.Version + return res +} + +type MockCreatePartitionsResponse struct { + t TestReporter +} + +func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse { + return &MockCreatePartitionsResponse{t: t} +} + +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreatePartitionsRequest) + res := &CreatePartitionsResponse{} + res.TopicPartitionErrors = make(map[string]*TopicPartitionError) + + for topic := range req.TopicPartitions { + if strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create partition on topic with reserved prefix" + res.TopicPartitionErrors[topic] = &TopicPartitionError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} + } + return res +} + +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + +type MockDeleteRecordsResponse struct { + t TestReporter +} + +func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { + return &MockDeleteRecordsResponse{t: t} +} + +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteRecordsRequest) + res := &DeleteRecordsResponse{} + res.Topics = make(map[string]*DeleteRecordsResponseTopic) + + for topic, deleteRecordRequestTopic := range req.Topics { + partitions := make(map[int32]*DeleteRecordsResponsePartition) + for partition := range deleteRecordRequestTopic.PartitionOffsets { + partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} + } + res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} + } + return res +} + +type MockDescribeConfigsResponse struct { + t TestReporter +} + +func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse { + return &MockDescribeConfigsResponse{t: t} +} + +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + includeSynonyms := req.Version > 0 + includeSource := req.Version > 0 + + for _, r := range req.Resources { + var configEntries []*ConfigEntry + switch r.Type { + case BrokerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "min.insync.replicas", + Value: "2", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case BrokerLoggerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "kafka.controller.KafkaController", + Value: "DEBUG", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case TopicResource: + maxMessageBytes := &ConfigEntry{ + Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: !includeSource, + Sensitive: false, + } + if includeSource { + maxMessageBytes.Source = SourceDefault + } + if includeSynonyms { + maxMessageBytes.Synonyms = []*ConfigSynonym{ + { + ConfigName: "max.message.bytes", + ConfigValue: "500000", + }, + } + } + retentionMs := &ConfigEntry{ + Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + } + if includeSynonyms { + retentionMs.Synonyms = []*ConfigSynonym{ + { + ConfigName: "log.retention.ms", + ConfigValue: "2500", + }, + } + } + password := &ConfigEntry{ + Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + } + configEntries = append( + configEntries, maxMessageBytes, retentionMs, password) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + } + } + return res +} + +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponse struct { + t TestReporter +} + +func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { + return &MockAlterConfigsResponse{t: t} +} + +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockCreateAclsResponse struct { + t TestReporter +} + +func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { + return &MockCreateAclsResponse{t: t} +} + +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) + } + return res +} + +type MockListAclsResponse struct { + t TestReporter +} + +func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { + return &MockListAclsResponse{t: t} +} + +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeAclsRequest) + res := &DescribeAclsResponse{} + res.Err = ErrNoError + acl := &ResourceAcls{} + if req.ResourceName != nil { + acl.Resource.ResourceName = *req.ResourceName + } + acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter + acl.Resource.ResourceType = req.ResourceType + + host := "*" + if req.Host != nil { + host = *req.Host + } + + principal := "User:test" + if req.Principal != nil { + principal = *req.Principal + } + + permissionType := req.PermissionType + if permissionType == AclPermissionAny { + permissionType = AclPermissionAllow + } + + acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal}) + res.ResourceAcls = append(res.ResourceAcls, acl) + res.Version = int16(req.Version) + return res +} + +type MockSaslAuthenticateResponse struct { + t TestReporter + kerror KError + saslAuthBytes []byte +} + +func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { + return &MockSaslAuthenticateResponse{t: t} +} + +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslAuthenticateResponse{} + res.Err = msar.kerror + res.SaslAuthBytes = msar.saslAuthBytes + return res +} + +func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { + msar.kerror = kerror + return msar +} + +func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { + msar.saslAuthBytes = saslAuthBytes + return msar +} + +type MockDeleteAclsResponse struct { + t TestReporter +} + +type MockSaslHandshakeResponse struct { + enabledMechanisms []string + kerror KError + t TestReporter +} + +func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { + return &MockSaslHandshakeResponse{t: t} +} + +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslHandshakeResponse{} + res.Err = mshr.kerror + res.EnabledMechanisms = mshr.enabledMechanisms + return res +} + +func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { + mshr.kerror = kerror + return mshr +} + +func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { + mshr.enabledMechanisms = enabledMechanisms + return mshr +} + +func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { + return &MockDeleteAclsResponse{t: t} +} + +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteAclsRequest) + res := &DeleteAclsResponse{} + + for range req.Filters { + response := &FilterResponse{Err: ErrNoError} + response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) + res.FilterResponses = append(res.FilterResponses, response) + } + res.Version = int16(req.Version) + return res +} + +type MockDeleteGroupsResponse struct { + deletedGroups []string +} + +func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse { + return &MockDeleteGroupsResponse{} +} + +func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse { + m.deletedGroups = groups + return m +} + +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteGroupsResponse{ + GroupErrorCodes: map[string]KError{}, + } + for _, group := range m.deletedGroups { + resp.GroupErrorCodes[group] = ErrNoError + } + return resp +} + +type MockJoinGroupResponse struct { + t TestReporter + + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { + return &MockJoinGroupResponse{ + t: t, + Members: make(map[string][]byte), + } +} + +func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*JoinGroupRequest) + resp := &JoinGroupResponse{ + Version: req.Version, + ThrottleTime: m.ThrottleTime, + Err: m.Err, + GenerationId: m.GenerationId, + GroupProtocol: m.GroupProtocol, + LeaderId: m.LeaderId, + MemberId: m.MemberId, + Members: m.Members, + } + return resp +} + +func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse { + m.ThrottleTime = t + return m +} + +func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse { + m.Err = kerr + return m +} + +func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse { + m.GenerationId = id + return m +} + +func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse { + m.GroupProtocol = proto + return m +} + +func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse { + m.LeaderId = id + return m +} + +func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse { + m.MemberId = id + return m +} + +func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse { + bin, err := encode(meta, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member metadata: %v", err)) + } + m.Members[id] = bin + return m +} + +type MockLeaveGroupResponse struct { + t TestReporter + + Err KError +} + +func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { + return &MockLeaveGroupResponse{t: t} +} + +func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &LeaveGroupResponse{ + Err: m.Err, + } + return resp +} + +func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse { + m.Err = kerr + return m +} + +type MockSyncGroupResponse struct { + t TestReporter + + Err KError + MemberAssignment []byte +} + +func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { + return &MockSyncGroupResponse{t: t} +} + +func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &SyncGroupResponse{ + Err: m.Err, + MemberAssignment: m.MemberAssignment, + } + return resp +} + +func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse { + m.Err = kerr + return m +} + +func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse { + bin, err := encode(assignment, nil) + if err != nil { + panic(fmt.Sprintf("error encoding member assignment: %v", err)) + } + m.MemberAssignment = bin + return m +} + +type MockHeartbeatResponse struct { + t TestReporter + + Err KError +} + +func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { + return &MockHeartbeatResponse{t: t} +} + +func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &HeartbeatResponse{} + return resp +} + +func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse { + m.Err = kerr + return m +} + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + var topics []DescribeLogDirsResponseTopic + for topic := range topicPartitions { + var partitions []DescribeLogDirsResponsePartition + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 00000000000..9931cade512 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,214 @@ +package sarama + +import "errors" + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + // - 3 (kafka 0.11.0 and later) + // - 4 (kafka 2.0.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 4 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} + +func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { + partitions := r.blocks[topic] + if partitions == nil { + return 0, "", errors.New("no such offset") + } + block := partitions[partitionID] + if block == nil { + return 0, "", errors.New("no such offset") + } + return block.offset, block.metadata, nil +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 00000000000..342260ef599 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,114 @@ +package sarama + +type OffsetCommitResponse struct { + Version int16 + ThrottleTimeMs int32 + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return r.Version +} + +func (r *OffsetCommitResponse) headerVersion() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 00000000000..7e147eb60c1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,207 @@ +package sarama + +type OffsetFetchRequest struct { + Version int16 + ConsumerGroup string + RequireStable bool // requires v7+ + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 7 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + isFlexible := r.Version >= 6 + + if isFlexible { + err = pe.putCompactString(r.ConsumerGroup) + } else { + err = pe.putString(r.ConsumerGroup) + } + if err != nil { + return err + } + + if isFlexible { + if r.partitions == nil { + pe.putUVarint(0) + } else { + pe.putCompactArrayLength(len(r.partitions)) + } + } else { + if r.partitions == nil && r.Version >= 2 { + pe.putInt32(-1) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + } + } + + for topic, partitions := range r.partitions { + if isFlexible { + err = pe.putCompactString(topic) + } else { + err = pe.putString(topic) + } + if err != nil { + return err + } + + // + + if isFlexible { + err = pe.putCompactInt32Array(partitions) + } else { + err = pe.putInt32Array(partitions) + } + if err != nil { + return err + } + + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + } + + if r.RequireStable && r.Version < 7 { + return PacketEncodingError{"requireStable is not supported. use version 7 or later"} + } + + if r.Version >= 7 { + pe.putBool(r.RequireStable) + } + + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + isFlexible := r.Version >= 6 + if isFlexible { + r.ConsumerGroup, err = pd.getCompactString() + } else { + r.ConsumerGroup, err = pd.getString() + } + if err != nil { + return err + } + + var partitionCount int + + if isFlexible { + partitionCount, err = pd.getCompactArrayLength() + } else { + partitionCount, err = pd.getArrayLength() + } + if err != nil { + return err + } + + if (partitionCount == 0 && version < 2) || partitionCount < 0 { + return nil + } + + r.partitions = make(map[string][]int32, partitionCount) + for i := 0; i < partitionCount; i++ { + var topic string + if isFlexible { + topic, err = pd.getCompactString() + } else { + topic, err = pd.getString() + } + if err != nil { + return err + } + + var partitions []int32 + if isFlexible { + partitions, err = pd.getCompactInt32Array() + } else { + partitions, err = pd.getInt32Array() + } + if err != nil { + return err + } + if isFlexible { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + + r.partitions[topic] = partitions + } + + if r.Version >= 7 { + r.RequireStable, err = pd.getBool() + if err != nil { + return err + } + } + + if isFlexible { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) headerVersion() int16 { + if r.Version >= 6 { + return 2 + } + + return 1 +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + case 6: + return V2_4_0_0 + case 7: + return V2_5_0_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchRequest) ZeroPartitions() { + if r.partitions == nil && r.Version >= 2 { + r.partitions = make(map[string][]int32) + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 00000000000..19449220f28 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,280 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + LeaderEpoch int32 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + isFlexible := version >= 6 + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 5 { + b.LeaderEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + + if isFlexible { + b.Metadata, err = pd.getCompactString() + } else { + b.Metadata, err = pd.getString() + } + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { + isFlexible := version >= 6 + pe.putInt64(b.Offset) + + if version >= 5 { + pe.putInt32(b.LeaderEpoch) + } + if isFlexible { + err = pe.putCompactString(b.Metadata) + } else { + err = pe.putString(b.Metadata) + } + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +type OffsetFetchResponse struct { + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetFetchResponseBlock + Err KError +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) { + isFlexible := r.Version >= 6 + + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + if isFlexible { + pe.putCompactArrayLength(len(r.Blocks)) + } else { + err = pe.putArrayLength(len(r.Blocks)) + } + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if isFlexible { + err = pe.putCompactString(topic) + } else { + err = pe.putString(topic) + } + if err != nil { + return err + } + + if isFlexible { + pe.putCompactArrayLength(len(partitions)) + } else { + err = pe.putArrayLength(len(partitions)) + } + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + } + if r.Version >= 2 { + pe.putInt16(int16(r.Err)) + } + if isFlexible { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + isFlexible := version >= 6 + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + var numTopics int + if isFlexible { + numTopics, err = pd.getCompactArrayLength() + } else { + numTopics, err = pd.getArrayLength() + } + if err != nil { + return err + } + + if numTopics > 0 { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + var name string + if isFlexible { + name, err = pd.getCompactString() + } else { + name, err = pd.getString() + } + if err != nil { + return err + } + + var numBlocks int + if isFlexible { + numBlocks, err = pd.getCompactArrayLength() + } else { + numBlocks, err = pd.getArrayLength() + } + if err != nil { + return err + } + + r.Blocks[name] = nil + if numBlocks > 0 { + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + } + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + + r.Blocks[name][id] = block + } + + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + } + + if version >= 2 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + } + + if isFlexible { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return r.Version +} + +func (r *OffsetFetchResponse) headerVersion() int16 { + if r.Version >= 6 { + return 1 + } + + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + case 6: + return V2_4_0_0 + case 7: + return V2_5_0_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go new file mode 100644 index 00000000000..4f480a08b90 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -0,0 +1,593 @@ +package sarama + +import ( + "sync" + "time" +) + +// Offset Manager + +// OffsetManager uses Kafka to store and fetch consumed partition offsets. +type OffsetManager interface { + // ManagePartition creates a PartitionOffsetManager on the given topic/partition. + // It will return an error if this OffsetManager is already managing the given + // topic/partition. + ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) + + // Close stops the OffsetManager from managing offsets. It is required to call + // this function before an OffsetManager object passes out of scope, as it + // will otherwise leak memory. You must call this after all the + // PartitionOffsetManagers are closed. + Close() error + + // Commit commits the offsets. This method can be used if AutoCommit.Enable is + // set to false. + Commit() +} + +type offsetManager struct { + client Client + conf *Config + group string + ticker *time.Ticker + + memberID string + generation int32 + + broker *Broker + brokerLock sync.RWMutex + + poms map[string]map[int32]*partitionOffsetManager + pomsLock sync.RWMutex + + closeOnce sync.Once + closing chan none + closed chan none +} + +// NewOffsetManagerFromClient creates a new OffsetManager from the given client. +// It is still necessary to call Close() on the underlying client when finished with the partition manager. +func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) +} + +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + conf := client.Config() + om := &offsetManager{ + client: client, + conf: conf, + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + + memberID: memberID, + generation: generation, + + closing: make(chan none), + closed: make(chan none), + } + if conf.Consumer.Offsets.AutoCommit.Enable { + om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) + go withRecover(om.mainLoop) + } + + return om, nil +} + +func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { + pom, err := om.newPartitionOffsetManager(topic, partition) + if err != nil { + return nil, err + } + + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + topicManagers := om.poms[topic] + if topicManagers == nil { + topicManagers = make(map[int32]*partitionOffsetManager) + om.poms[topic] = topicManagers + } + + if topicManagers[partition] != nil { + return nil, ConfigurationError("That topic/partition is already being managed") + } + + topicManagers[partition] = pom + return pom, nil +} + +func (om *offsetManager) Close() error { + om.closeOnce.Do(func() { + // exit the mainLoop + close(om.closing) + if om.conf.Consumer.Offsets.AutoCommit.Enable { + <-om.closed + } + + // mark all POMs as closed + om.asyncClosePOMs() + + // flush one last time + if om.conf.Consumer.Offsets.AutoCommit.Enable { + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } + } + } + + om.releasePOMs(true) + om.brokerLock.Lock() + om.broker = nil + om.brokerLock.Unlock() + }) + return nil +} + +func (om *offsetManager) computeBackoff(retries int) time.Duration { + if om.conf.Metadata.Retry.BackoffFunc != nil { + return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) + } else { + return om.conf.Metadata.Retry.Backoff + } +} + +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { + broker, err := om.coordinator() + if err != nil { + if retries <= 0 { + return 0, "", err + } + return om.fetchInitialOffset(topic, partition, retries-1) + } + + req := new(OffsetFetchRequest) + req.Version = 1 + req.ConsumerGroup = om.group + req.AddPartition(topic, partition) + + resp, err := broker.FetchOffset(req) + if err != nil { + if retries <= 0 { + return 0, "", err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + } + + block := resp.GetBlock(topic, partition) + if block == nil { + return 0, "", ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + return block.Offset, block.Metadata, nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return 0, "", block.Err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return 0, "", block.Err + } + backoff := om.computeBackoff(retries) + select { + case <-om.closing: + return 0, "", block.Err + case <-time.After(backoff): + } + return om.fetchInitialOffset(topic, partition, retries-1) + default: + return 0, "", block.Err + } +} + +func (om *offsetManager) coordinator() (*Broker, error) { + om.brokerLock.RLock() + broker := om.broker + om.brokerLock.RUnlock() + + if broker != nil { + return broker, nil + } + + om.brokerLock.Lock() + defer om.brokerLock.Unlock() + + if broker := om.broker; broker != nil { + return broker, nil + } + + if err := om.client.RefreshCoordinator(om.group); err != nil { + return nil, err + } + + broker, err := om.client.Coordinator(om.group) + if err != nil { + return nil, err + } + + om.broker = broker + return broker, nil +} + +func (om *offsetManager) releaseCoordinator(b *Broker) { + om.brokerLock.Lock() + if om.broker == b { + om.broker = nil + } + om.brokerLock.Unlock() +} + +func (om *offsetManager) mainLoop() { + defer om.ticker.Stop() + defer close(om.closed) + + for { + select { + case <-om.ticker.C: + om.Commit() + case <-om.closing: + return + } + } +} + +func (om *offsetManager) Commit() { + om.flushToBroker() + om.releasePOMs(false) +} + +func (om *offsetManager) flushToBroker() { + req := om.constructRequest() + if req == nil { + return + } + + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + resp, err := broker.CommitOffset(req) + if err != nil { + om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() + return + } + + om.handleResponse(broker, req, resp) +} + +func (om *offsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if om.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } + + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.lock.Lock() + if pom.dirty { + r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + } + pom.lock.Unlock() + } + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil { + continue + } + + var err KError + var ok bool + + if resp.Errors[pom.topic] == nil { + pom.handleError(ErrIncompleteResponse) + continue + } + if err, ok = resp.Errors[pom.topic][pom.partition]; !ok { + pom.handleError(ErrIncompleteResponse) + continue + } + + switch err { + case ErrNoError: + block := req.blocks[pom.topic][pom.partition] + pom.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + om.releaseCoordinator(broker) + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + pom.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata req and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + pom.handleError(err) + om.releaseCoordinator(broker) + } + } + } +} + +func (om *offsetManager) handleError(err error) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.handleError(err) + } + } +} + +func (om *offsetManager) asyncClosePOMs() { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.AsyncClose() + } + } +} + +// Releases/removes closed POMs once they are clean (or when forced) +func (om *offsetManager) releasePOMs(force bool) (remaining int) { + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + for topic, topicManagers := range om.poms { + for partition, pom := range topicManagers { + pom.lock.Lock() + releaseDue := pom.done && (force || !pom.dirty) + pom.lock.Unlock() + + if releaseDue { + pom.release() + + delete(om.poms[topic], partition) + if len(om.poms[topic]) == 0 { + delete(om.poms, topic) + } + } + } + remaining += len(om.poms[topic]) + } + return +} + +func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + if partitions, ok := om.poms[topic]; ok { + if pom, ok := partitions[partition]; ok { + return pom + } + } + return nil +} + +// Partition Offset Manager + +// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() +// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes +// out of scope. +type PartitionOffsetManager interface { + // NextOffset returns the next offset that should be consumed for the managed + // partition, accompanied by metadata which can be used to reconstruct the state + // of the partition consumer when it resumes. NextOffset() will return + // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset + // was committed for this partition yet. + NextOffset() (int64, string) + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(offset int64, metadata string) + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + + // Errors returns a read channel of errors that occur during offset management, if + // enabled. By default, errors are logged and not returned over this channel. If + // you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will + // return immediately, after which you should wait until the 'errors' channel has + // been drained and closed. It is required to call this function, or Close before + // a consumer object passes out of scope, as it will otherwise leak memory. You + // must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionOffsetManager from managing offsets. It is required to + // call this function (or AsyncClose) before a PartitionOffsetManager object + // passes out of scope, as it will otherwise leak memory. You must call this + // before calling Close on the underlying client. + Close() error +} + +type partitionOffsetManager struct { + parent *offsetManager + topic string + partition int32 + + lock sync.Mutex + offset int64 + metadata string + dirty bool + done bool + + releaseOnce sync.Once + errors chan *ConsumerError +} + +func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { + offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + if err != nil { + return nil, err + } + + return &partitionOffsetManager{ + parent: om, + topic: topic, + partition: partition, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + offset: offset, + metadata: metadata, + }, nil +} + +func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { + return pom.errors +} + +func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset > pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset == offset && pom.metadata == metadata { + pom.dirty = false + } +} + +func (pom *partitionOffsetManager) NextOffset() (int64, string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset >= 0 { + return pom.offset, pom.metadata + } + + return pom.parent.conf.Consumer.Offsets.Initial, "" +} + +func (pom *partitionOffsetManager) AsyncClose() { + pom.lock.Lock() + pom.done = true + pom.lock.Unlock() +} + +func (pom *partitionOffsetManager) Close() error { + pom.AsyncClose() + + var errors ConsumerErrors + for err := range pom.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, + } + + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (pom *partitionOffsetManager) release() { + pom.releaseOnce.Do(func() { + close(pom.errors) + }) +} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 00000000000..4c9ce4df552 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,179 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 // Only used in version 0 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.time) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + } + return nil +} + +type OffsetRequest struct { + Version int16 + IsolationLevel IsolationLevel + replicaID int32 + isReplicaIDSet bool + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + if r.isReplicaIDSet { + pe.putInt32(r.replicaID) + } else { + // default replica ID is always -1 for clients + pe.putInt32(-1) + } + + if r.Version >= 2 { + pe.putBool(r.IsolationLevel == ReadCommitted) + } + + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + replicaID, err := pd.getInt32() + if err != nil { + return err + } + if replicaID >= 0 { + r.SetReplicaID(replicaID) + } + + if r.Version >= 2 { + tmp, err := pd.getBool() + if err != nil { + return err + } + + r.IsolationLevel = ReadUncommitted + if tmp { + r.IsolationLevel = ReadCommitted + } + } + + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd, version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return r.Version +} + +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + case 2: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *OffsetRequest) SetReplicaID(id int32) { + r.replicaID = id + r.isReplicaIDSet = true +} + +func (r *OffsetRequest) ReplicaID() int32 { + if r.isReplicaIDSet { + return r.replicaID + } + return -1 +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 00000000000..69349efe2ba --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,192 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if version == 0 { + b.Offsets, err = pd.getInt64Array() + + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil +} + +type OffsetResponse struct { + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 2 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTimeMs) + } + + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.version()); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + case 2: + return V0_11_0_0 + default: + return MinVersion + } +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 00000000000..184bc26ae99 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,68 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getVarint() (int64, error) + getUVarint() (uint64, error) + getArrayLength() (int, error) + getCompactArrayLength() (int, error) + getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) + + // Collections + getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getCompactBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) + getString() (string, error) + getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + peekInt8(offset int) (int8, error) // similar to peek, but just one byte + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 00000000000..aea53ca83b4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,73 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) + putArrayLength(in int) error + putBool(in bool) + + // Collections + putBytes(in []byte) error + putVarintBytes(in []byte) error + putCompactBytes(in []byte) error + putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error + putString(in string) error + putNullableString(in *string) error + putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + putEmptyTaggedFieldArray() + + // Provide the current offset to record the batch size metric + offset() int + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 00000000000..a66e11ea3fc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,217 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// DynamicConsistencyPartitioner can optionally be implemented by Partitioners +// in order to allow more flexibility than is originally allowed by the +// RequiresConsistency method in the Partitioner interface. This allows +// partitioners to require consistency sometimes, but not all times. It's useful +// for, e.g., the HashPartitioner, which does not require consistency if the +// message key is nil. +type DynamicConsistencyPartitioner interface { + Partitioner + + // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency, + // but takes in the message being partitioned so that the partitioner can + // make a per-message determination. + MessageRequiresConsistency(message *ProducerMessage) bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// HashPartitionerOption lets you modify default values of the partitioner +type HashPartitionerOption func(*hashPartitioner) + +// WithAbsFirst means that the partitioner handles absolute values +// in the same way as the reference Java implementation +func WithAbsFirst() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.referenceAbs = true + } +} + +// WithCustomHashFunction lets you specify what hash function to use for the partitioning +func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hasher = hasher() + } +} + +// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty +func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.random = hp + } +} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 + referenceAbs bool +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + p.referenceAbs = false + return p + } +} + +// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options +func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + for _, option := range options { + option(p) + } + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + return p +} + +// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values +// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do +// that but it had a mistake and now there are people depending on both behaviours. This will +// all go away on the next major version bump. +func NewReferenceHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = true + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + var partition int32 + // Turns out we were doing our absolute value in a subtly different way from the upstream + // implementation, but now we need to maintain backwards compat for people who started using + // the old version; if referenceAbs is set we are compatible with the reference java client + // but not past Sarama versions + if p.referenceAbs { + partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else { + partition = int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} + +func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool { + return message.Key != nil +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 00000000000..0d01374879a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,207 @@ +package sarama + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/rcrowley/go-metrics" +) + +type prepEncoder struct { + stack []pushEncoder + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil + } + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putCompactBytes(in []byte) error { + pe.putUVarint(uint64(len(in) + 1)) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) + } +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + +func (pe *prepEncoder) offset() int { + return pe.length +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) + pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) +} + +func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 00000000000..0034651e254 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,258 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + totalRecordCount := int64(0) + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, records := range partitions { + startOffset := pe.offset() + pe.putInt32(id) + pe.push(&lengthField{}) + err = records.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + if metricRegistry != nil { + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.records = make(map[string]map[int32]Records) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.records[topic] = make(map[int32]Records) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + size, err := pd.getInt32() + if err != nil { + return err + } + recordsDecoder, err := pd.getSubset(int(size)) + if err != nil { + return err + } + var records Records + if err := records.decode(recordsDecoder); err != nil { + return err + } + r.records[topic][partition] = records + } + } + + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + case 7: + return V2_1_0_0 + default: + return MinVersion + } +} + +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) + } + + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].MsgSet + + if set == nil { + set = new(MessageSet) + r.records[topic][partition] = newLegacyRecords(set) + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} + +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) +} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 00000000000..edf978790c9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,212 @@ +package sarama + +import ( + "fmt" + "time" +) + +// Protocol, http://kafka.apache.org/protocol.html +// v1 +// v2 = v3 = v4 +// v5 = v6 = v7 +// Produce Response (Version: 7) => [responses] throttle_time_ms +// responses => topic [partition_responses] +// topic => STRING +// partition_responses => partition error_code base_offset log_append_time log_start_offset +// partition => INT32 +// error_code => INT16 +// base_offset => INT64 +// log_append_time => INT64 +// log_start_offset => INT64 +// throttle_time_ms => INT32 + +// partition_responses in protocol +type ProduceResponseBlock struct { + Err KError // v0, error_code + Offset int64 // v0, base_offset + Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` + StartOffset int64 // v5, log_start_offset +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + if version >= 5 { + b.StartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + if version >= 5 { + pe.putInt64(b.StartOffset) + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses + Version int16 + ThrottleTime time.Duration // v1, throttle_time_ms +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err + } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } + } + } + + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + return MinVersion +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + block := &ProduceResponseBlock{ + Err: err, + } + if r.Version >= 2 { + block.Timestamp = time.Now() + } + byTopic[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go new file mode 100644 index 00000000000..9c70f818006 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -0,0 +1,273 @@ +package sarama + +import ( + "encoding/binary" + "errors" + "time" +) + +type partitionSet struct { + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + timestamp := msg.Timestamp + if timestamp.IsZero() { + timestamp = time.Now() + } + timestamp = timestamp.Truncate(time.Millisecond) + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + var size int + + set := partitions[msg.Partition] + if set == nil { + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, + } + if ps.parent.conf.Producer.Idempotent { + batch.FirstSequence = msg.sequenceNumber + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } + partitions[msg.Partition] = set + } + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { + return errors.New("assertion failed: message out of sequence added to a batch") + } + } + + // Past this point we can't return an error, because we've already added the message to the set. + set.msgs = append(set.msgs, msg) + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.RecordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.MsgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) + } + + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } + + if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + req.Version = 7 + } + + for topic, partitionSets := range ps.msgs { + for partition, set := range partitionSets { + if req.Version >= 3 { + // If the API version we're hitting is 3 or greater, we need to calculate + // offsets for each record in the batch relative to FirstOffset. + // Additionally, we must set LastOffsetDelta to the value of the last offset + // in the batch. Since the OffsetDelta of the first record is 0, we know that the + // final record of any batch will have an offset of (# of records in batch) - 1. + // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets + // under the RecordBatch section for details.) + rb := set.recordsToSend.RecordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + req.AddBatch(topic, partition, rb) + continue + } + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.recordsToSend.MsgSet) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.MsgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + compMsg := &Message{ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + Key: nil, + Value: payload, + Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a message-batch for this partition? + case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 00000000000..2482c637763 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,437 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var ( + errInvalidArrayLength = PacketDecodingError{"invalid array length"} + errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} + errInvalidStringLength = PacketDecodingError{"invalid string length"} + errVarintOverflow = PacketDecodingError{"varint overflow"} + errUVarintOverflow = PacketDecodingError{"uvarint overflow"} + errInvalidBool = PacketDecodingError{"invalid bool"} + errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} +) + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getCompactBytes() ([]byte, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + length := int(n - 1) + return rd.getRawBytes(length) +} + +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() + if err != nil { + return 0, err + } + + n := int(length) + + switch { + case n < -1: + return 0, errInvalidStringLength + case n > rd.remaining(): + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + length := int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return tmpStr, nil +} + +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + length := int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + str, err := rd.getString() + if err != nil { + return nil, err + } + + ret[i] = str + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { + if length < 0 { + return nil, errInvalidByteSliceLength + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil +} + +func (rd *realDecoder) peekInt8(offset int) (int8, error) { + const byteLen = 1 + if rd.remaining() < offset+byteLen { + return -1, ErrInsufficientData + } + return int8(rd.raw[rd.off+offset]), nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 00000000000..c07204cbc73 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,213 @@ +package sarama + +import ( + "encoding/binary" + "errors" + + "github.com/rcrowley/go-metrics" +) + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder + registry metrics.Registry +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putCompactBytes(in []byte) error { + re.putUVarint(uint64(len(in) + 1)) + return re.putRawBytes(in) +} + +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + +func (re *realEncoder) offset() int { + return re.off +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 00000000000..a3fe8c0614e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,116 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + isTransactionalMask = 0x10 + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +// RecordHeader stores key and value for a record header +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +// Record is kafka record type +type Record struct { + Headers []*RecordHeader + + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 00000000000..c653763eca8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,225 @@ +package sarama + +import ( + "fmt" + "time" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + CompressionLevel int + Control bool + LogAppendTime bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + IsTransactional bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) LastOffset() int64 { + return b.FirstOffset + int64(b.LastOffsetDelta) +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + crc32Decoder := acquireCrc32Field(crcCastagnoli) + defer releaseCrc32Field(crc32Decoder) + + if err = pd.push(crc32Decoder); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask + b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + recBuffer, err = decompress(b.Codec, recBuffer) + if err != nil { + return err + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) + return err +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + if b.LogAppendTime { + attr |= timestampTypeMask + } + if b.IsTransactional { + attr |= isTransactionalMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 00000000000..f4c5e95f1de --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,203 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + MsgSet *MessageSet + RecordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, MsgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, RecordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.MsgSet == nil && r.RecordBatch == nil { + return true, nil + } + if r.MsgSet != nil && r.RecordBatch != nil { + return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.MsgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return nil + } + return r.MsgSet.encode(pe) + case defaultRecords: + if r.RecordBatch == nil { + return nil + } + return r.RecordBatch.encode(pe) + } + + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + magic, err := magicValue(pd) + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return err + } + } + + switch r.recordsType { + case legacyRecords: + r.MsgSet = &MessageSet{} + return r.MsgSet.decode(pd) + case defaultRecords: + r.RecordBatch = &RecordBatch{} + return r.RecordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return 0, nil + } + return len(r.MsgSet.Messages), nil + case defaultRecords: + if r.RecordBatch == nil { + return 0, nil + } + return len(r.RecordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isOverflow() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.OverflowMessage, nil + case defaultRecords: + return false, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func magicValue(pd packetDecoder) (int8, error) { + return pd.peekInt8(magicOffset) +} + +func (r *Records) getControlRecord() (ControlRecord, error) { + if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") + } + + firstRecord := r.RecordBatch.Records[0] + controlRecord := ControlRecord{} + err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) + if err != nil { + return ControlRecord{}, err + } + + return controlRecord, nil +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go new file mode 100644 index 00000000000..d899df53463 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request.go @@ -0,0 +1,197 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + headerVersion() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) error { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } + } + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) + } + + err := r.body.encode(pe) + if err != nil { + return err + } + + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + key, err := pd.getInt16() + if err != nil { + return err + } + + version, err := pd.getInt16() + if err != nil { + return err + } + + r.correlationID, err = pd.getInt32() + if err != nil { + return err + } + + r.clientID, err = pd.getString() + if err != nil { + return err + } + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (*request, int, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, bytesRead, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, bytesRead, err + } + + bytesRead += len(encodedReq) + + req := &request{} + if err := decode(encodedReq, req); err != nil { + return nil, bytesRead, err + } + + return req, bytesRead, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{Version: version} + case 2: + return &OffsetRequest{Version: version} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{Version: version} + case 10: + return &FindCoordinatorRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 21: + return &DeleteRecordsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 35: + return &DescribeLogDirsRequest{} + case 36: + return &SaslAuthenticateRequest{} + case 37: + return &CreatePartitionsRequest{} + case 42: + return &DeleteGroupsRequest{} + case 44: + return &IncrementalAlterConfigsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} + case 50: + return &DescribeUserScramCredentialsRequest{} + case 51: + return &AlterUserScramCredentialsRequest{} + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go new file mode 100644 index 00000000000..fbcef0bfbea --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,33 @@ +package sarama + +import "fmt" + +const ( + responseLengthSize = 4 + correlationIDSize = 4 +) + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return err +} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go new file mode 100644 index 00000000000..48f362d287e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,110 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use Consumer or Consumer-Group API. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +Consumer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +var ( + // Logger is the instance of a StdLogger interface that Sarama writes connection + // management events to. By default it is set to discard all log messages via ioutil.Discard, + // but you can set it to redirect wherever you want. + Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + + // PanicHandler is called for recovering from panics spawned internally to the library (and thus + // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. + PanicHandler func(interface{}) + + // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying + // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned + // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt + // to process. + MaxRequestSize int32 = 100 * 1024 * 1024 + + // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If + // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to + // protect the client from running out of memory. Please note that brokers do not have any natural limit on + // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers + // (see https://issues.apache.org/jira/browse/KAFKA-2063). + MaxResponseSize int32 = 100 * 1024 * 1024 +) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go new file mode 100644 index 00000000000..90504df6f52 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslAuthenticateRequest struct { + SaslAuthBytes []byte +} + +// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API +const APIKeySASLAuth = 36 + +func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.SaslAuthBytes, err = pd.getBytes() + return err +} + +func (r *SaslAuthenticateRequest) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateRequest) version() int16 { + return 0 +} + +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go new file mode 100644 index 00000000000..3ef57b5afad --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -0,0 +1,48 @@ +package sarama + +type SaslAuthenticateResponse struct { + Err KError + ErrorMessage *string + SaslAuthBytes []byte +} + +func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putNullableString(r.ErrorMessage); err != nil { + return err + } + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.ErrorMessage, err = pd.getNullableString(); err != nil { + return err + } + + r.SaslAuthBytes, err = pd.getBytes() + + return err +} + +func (r *SaslAuthenticateResponse) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateResponse) version() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 00000000000..74dc3072f48 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string + Version int16 +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return r.Version +} + +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 00000000000..69dfc3178ec --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,42 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/Shopify/sarama/scram_formatter.go new file mode 100644 index 00000000000..2af9e4a695f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/scram_formatter.go @@ -0,0 +1,78 @@ +package sarama + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "hash" +) + +// ScramFormatter implementation +// @see: https://github.com/apache/kafka/blob/99b9b3e84f4e98c3f07714e1de6a139a004cbc5b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramFormatter.java#L93 +type scramFormatter struct { + mechanism ScramMechanismType +} + +func (s scramFormatter) mac(key []byte) (hash.Hash, error) { + var m hash.Hash + + switch s.mechanism { + case SCRAM_MECHANISM_SHA_256: + m = hmac.New(sha256.New, key) + + case SCRAM_MECHANISM_SHA_512: + m = hmac.New(sha512.New, key) + default: + return nil, ErrUnknownScramMechanism + } + + return m, nil +} + +func (s scramFormatter) hmac(key []byte, extra []byte) ([]byte, error) { + mac, err := s.mac(key) + if err != nil { + return nil, err + } + + if _, err := mac.Write(extra); err != nil { + return nil, err + } + return mac.Sum(nil), nil +} + +func (s scramFormatter) xor(result []byte, second []byte) { + for i := 0; i < len(result); i++ { + result[i] = result[i] ^ second[i] + } +} + +func (s scramFormatter) saltedPassword(password []byte, salt []byte, iterations int) ([]byte, error) { + mac, err := s.mac(password) + if err != nil { + return nil, err + } + + if _, err := mac.Write(salt); err != nil { + return nil, err + } + if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil { + return nil, err + } + + u1 := mac.Sum(nil) + prev := u1 + result := u1 + + for i := 2; i <= iterations; i++ { + ui, err := s.hmac(password, prev) + if err != nil { + return nil, err + } + + s.xor(result, ui) + prev = ui + } + + return result, nil +} diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go new file mode 100644 index 00000000000..161233fc357 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go @@ -0,0 +1,124 @@ +package sarama + +type topicPartitionAssignment struct { + Topic string + Partition int32 +} + +type StickyAssignorUserData interface { + partitions() []topicPartitionAssignment + hasGeneration() bool + generation() int +} + +// StickyAssignorUserDataV0 holds topic partition information for an assignment +type StickyAssignorUserDataV0 struct { + Topics map[string][]int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } +func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } + +// StickyAssignorUserDataV1 holds topic partition information for an assignment +type StickyAssignorUserDataV1 struct { + Topics map[string][]int32 + Generation int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + pe.putInt32(m.Generation) + return nil +} + +func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + m.Generation, err = pd.getInt32() + if err != nil { + return err + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true } +func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) } + +func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment { + topicPartitions := make([]topicPartitionAssignment, 0) + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition}) + } + } + return topicPartitions +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go new file mode 100644 index 00000000000..ac6ecb13e04 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -0,0 +1,104 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment, nil) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go new file mode 100644 index 00000000000..af019c42f97 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -0,0 +1,45 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 00000000000..021c5a01032 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,149 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.expectation + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.expectation + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 00000000000..372278d0bfa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 00000000000..c4043a33520 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,130 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 00000000000..94d8029dace --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,87 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go new file mode 100644 index 00000000000..1859d29c213 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -0,0 +1,234 @@ +package sarama + +import ( + "bufio" + "fmt" + "net" + "regexp" +) + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupInt32Slice(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + ret = append(ret, input...) + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} + +// bufConn wraps a net.Conn with a buffer for reads to reduce the number of +// reads that trigger syscalls. +type bufConn struct { + net.Conn + buf *bufio.Reader +} + +func newBufConn(conn net.Conn) *bufConn { + return &bufConn{ + Conn: conn, + buf: bufio.NewReader(conn), + } +} + +func (bc *bufConn) Read(b []byte) (n int, err error) { + return bc.buf.Read(b) +} + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) + V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) + V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) + V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) + V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) + V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) + V2_8_0_0 = newKafkaVersion(2, 8, 0, 0) + + SupportedVersions = []KafkaVersion{ + V0_8_2_0, + V0_8_2_1, + V0_8_2_2, + V0_9_0_0, + V0_9_0_1, + V0_10_0_0, + V0_10_0_1, + V0_10_1_0, + V0_10_1_1, + V0_10_2_0, + V0_10_2_1, + V0_11_0_0, + V0_11_0_1, + V0_11_0_2, + V1_0_0_0, + V1_1_0_0, + V1_1_1_0, + V2_0_0_0, + V2_0_1_0, + V2_1_0_0, + V2_2_0_0, + V2_3_0_0, + V2_4_0_0, + V2_5_0_0, + V2_6_0_0, + V2_7_0_0, + V2_8_0_0, + } + MinVersion = V0_8_2_0 + MaxVersion = V2_8_0_0 + DefaultVersion = V1_0_0_0 +) + +// ParseKafkaVersion parses and returns kafka version or error from a string +func ParseKafkaVersion(s string) (KafkaVersion, error) { + if len(s) < 5 { + return DefaultVersion, fmt.Errorf("invalid version `%s`", s) + } + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return DefaultVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } + + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) +} diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go new file mode 100644 index 00000000000..e23bfc4772f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -0,0 +1,18 @@ +package sarama + +import ( + "github.com/klauspost/compress/zstd" +) + +var ( + zstdDec, _ = zstd.NewReader(nil) + zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) +) + +func zstdDecompress(dst, src []byte) ([]byte, error) { + return zstdDec.DecodeAll(src, dst) +} + +func zstdCompress(dst, src []byte) ([]byte, error) { + return zstdEnc.EncodeAll(src, dst), nil +} diff --git a/vendor/github.com/TykTechnologies/again/LICENSE b/vendor/github.com/TykTechnologies/again/LICENSE new file mode 100644 index 00000000000..363fa9ee77b --- /dev/null +++ b/vendor/github.com/TykTechnologies/again/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/TykTechnologies/again/README.md b/vendor/github.com/TykTechnologies/again/README.md new file mode 100644 index 00000000000..e256bb4c7cb --- /dev/null +++ b/vendor/github.com/TykTechnologies/again/README.md @@ -0,0 +1,2 @@ +# again +graceful restarts with multiple listeners support for Go diff --git a/vendor/github.com/TykTechnologies/again/again.go b/vendor/github.com/TykTechnologies/again/again.go new file mode 100644 index 00000000000..0e28dcbb65f --- /dev/null +++ b/vendor/github.com/TykTechnologies/again/again.go @@ -0,0 +1,457 @@ +package again + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "os/exec" + "os/signal" + "reflect" + "strings" + "sync" + "syscall" +) + +var OnForkHook func() + +// Don't make the caller import syscall. +const ( + SIGINT = syscall.SIGINT + SIGQUIT = syscall.SIGQUIT + SIGTERM = syscall.SIGTERM + SIGUSR2 = syscall.SIGUSR2 +) + +// Service is a single service listening on a single net.Listener. +type Service struct { + Name string + FdName string + Descriptor uintptr + Listener net.Listener +} + +// Hooks callbacks invoked when specific signal is received. +type Hooks struct { + // OnSIGHUP is the function called when the server receives a SIGHUP + // signal. The normal use case for SIGHUP is to reload the + // configuration. + OnSIGHUP func(*Again) error + // OnSIGUSR1 is the function called when the server receives a + // SIGUSR1 signal. The normal use case for SIGUSR1 is to repon the + // log files. + OnSIGUSR1 func(*Again) error + // OnSIGQUIT use this for graceful shutdown + OnSIGQUIT func(*Again) error + OnSIGTERM func(*Again) error +} + +// Again manages services that need graceful restarts +type Again struct { + services *sync.Map + Hooks Hooks +} + +func New(hooks ...Hooks) Again { + var h Hooks + if len(hooks) > 0 { + h = hooks[0] + } + return Again{ + services: &sync.Map{}, + Hooks: h, + } +} + +func (a *Again) Env() (m map[string]string, err error) { + var fds []string + var names []string + var fdNames []string + a.services.Range(func(k, value interface{}) bool { + s := value.(*Service) + names = append(names, s.Name) + _, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, s.Descriptor, syscall.F_SETFD, 0) + if 0 != e1 { + err = e1 + return false + } + fds = append(fds, fmt.Sprint(s.Descriptor)) + fdNames = append(fdNames, s.FdName) + return true + }) + if err != nil { + return + } + return map[string]string{ + "GOAGAIN_FD": strings.Join(fds, ","), + "GOAGAIN_SERVICE_NAME": strings.Join(names, ","), + "GOAGAIN_NAME": strings.Join(fdNames, ","), + }, nil +} + +func ListerName(l net.Listener) string { + addr := l.Addr() + return fmt.Sprintf("%s:%s->", addr.Network(), addr.String()) +} + +func (a *Again) Range(fn func(*Service)) { + a.services.Range(func(k, v interface{}) bool { + s := v.(*Service) + fn(s) + return true + }) +} + +// Close tries to close all service listeners +func (a Again) Close() error { + var e bytes.Buffer + a.Range(func(s *Service) { + if err := s.Listener.Close(); err != nil { + e.WriteString(err.Error()) + e.WriteByte('\n') + } + }) + if e.Len() > 0 { + return errors.New(e.String()) + } + return nil +} +func hasElem(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return true + default: + return false + } +} + +// Listen creates a new service with the given listener. +func (a *Again) Listen(name string, ls net.Listener) error { + v := reflect.ValueOf(ls) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // check if we have net.Listener embedded. Its a workaround to support + // crypto/tls Listen + if ls := v.FieldByName("Listener"); ls.IsValid() { + for hasElem(ls) { + ls = ls.Elem() + } + v = ls + } + if v.Kind() != reflect.Struct { + return fmt.Errorf("Not supported by current Go version") + } + v = v.FieldByName("fd") + if !v.IsValid() { + return fmt.Errorf("Not supported by current Go version") + } + v = v.Elem() + fdField := v.FieldByName("sysfd") + if !fdField.IsValid() { + fdField = v.FieldByName("pfd").FieldByName("Sysfd") + } + + if !fdField.IsValid() { + return fmt.Errorf("Not supported by current Go version") + } + fd := uintptr(fdField.Int()) + a.services.Store(name, &Service{ + Name: name, + FdName: ListerName(ls), + Listener: ls, + Descriptor: fd, + }) + return nil +} + +func (a Again) Get(name string) *Service { + s, _ := a.services.Load(name) + if s != nil { + return s.(*Service) + } + return nil +} + +func (a Again) Delete(name string) { + a.services.Delete(name) +} + +func (a Again) GetListener(key string) net.Listener { + if s := a.Get(key); s != nil { + return s.Listener + } + return nil +} + +// Re-exec this same image without dropping the net.Listener. +func Exec(a *Again) error { + var pid int + fmt.Sscan(os.Getenv("GOAGAIN_PID"), &pid) + if syscall.Getppid() == pid { + return fmt.Errorf("goagain.Exec called by a child process") + } + argv0, err := lookPath() + if nil != err { + return err + } + if err := setEnvs(a); nil != err { + return err + } + if err := os.Setenv( + "GOAGAIN_SIGNAL", + fmt.Sprintf("%d", syscall.SIGQUIT), + ); nil != err { + return err + } + log.Println("re-executing", argv0) + return syscall.Exec(argv0, os.Args, os.Environ()) +} + +// Fork and exec this same image without dropping the net.Listener. +func ForkExec(a *Again) error { + argv0, err := lookPath() + if nil != err { + return err + } + wd, err := os.Getwd() + if nil != err { + return err + } + err = setEnvs(a) + if nil != err { + return err + } + if err := os.Setenv("GOAGAIN_PID", ""); nil != err { + return err + } + if err := os.Setenv( + "GOAGAIN_PPID", + fmt.Sprint(syscall.Getpid()), + ); nil != err { + return err + } + + sig := syscall.SIGQUIT + if err := os.Setenv("GOAGAIN_SIGNAL", fmt.Sprintf("%d", sig)); nil != err { + return err + } + + files := []*os.File{ + os.Stdin, os.Stdout, os.Stderr, + } + a.Range(func(s *Service) { + files = append(files, os.NewFile( + s.Descriptor, + ListerName(s.Listener), + )) + }) + p, err := os.StartProcess(argv0, os.Args, &os.ProcAttr{ + Dir: wd, + Env: os.Environ(), + Files: files, + Sys: &syscall.SysProcAttr{}, + }) + if nil != err { + return err + } + log.Println("spawned child", p.Pid) + if err = os.Setenv("GOAGAIN_PID", fmt.Sprint(p.Pid)); nil != err { + return err + } + return nil +} + +// IsErrClosing tests whether an error is equivalent to net.errClosing as returned by +// Accept during a graceful exit. +func IsErrClosing(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + return "use of closed network connection" == err.Error() +} + +// Child returns true if this process is managed by again and its a child +// process. +func Child() bool { + d := os.Getenv("GOAGAIN_PID") + if d == "" { + d = os.Getenv("GOAGAIN_PPID") + } + var pid int + _, err := fmt.Sscan(d, &pid) + return err == nil +} + +// Kill process specified in the environment with the signal specified in the +// environment; default to SIGQUIT. +func Kill() error { + var ( + pid int + sig syscall.Signal + ) + _, err := fmt.Sscan(os.Getenv("GOAGAIN_PID"), &pid) + if io.EOF == err { + _, err = fmt.Sscan(os.Getenv("GOAGAIN_PPID"), &pid) + } + if nil != err { + return err + } + if _, err := fmt.Sscan(os.Getenv("GOAGAIN_SIGNAL"), &sig); nil != err { + sig = syscall.SIGQUIT + } + log.Println("sending signal", sig, "to process", pid) + return syscall.Kill(pid, sig) +} + +// Listen checks env and constructs a Again instance if this is a child process +// that was froked by again parent. +// +// forkHook if provided will be called before forking. +func Listen(forkHook func()) (*Again, error) { + a := New() + if err := ListenFrom(&a, forkHook); err != nil { + return nil, err + } + return &a, nil +} + +func ListenFrom(a *Again, forkHook func()) error { + OnForkHook = forkHook + fds := strings.Split(os.Getenv("GOAGAIN_FD"), ",") + names := strings.Split(os.Getenv("GOAGAIN_SERVICE_NAME"), ",") + fdNames := strings.Split(os.Getenv("GOAGAIN_NAME"), ",") + if !((len(fds) == len(names)) && (len(fds) == len(fdNames))) { + errors.New(("again: names/fds mismatch")) + } + for k, f := range fds { + if f == "" { + continue + } + var s Service + _, err := fmt.Sscan(f, &s.Descriptor) + if err != nil { + return err + } + s.Name = names[k] + s.FdName = fdNames[k] + l, err := net.FileListener(os.NewFile(s.Descriptor, s.FdName)) + if err != nil { + return err + } + s.Listener = l + switch l.(type) { + case *net.TCPListener, *net.UnixListener: + default: + return fmt.Errorf( + "file descriptor is %T not *net.TCPListener or *net.UnixListener", + l, + ) + } + if err = syscall.Close(int(s.Descriptor)); nil != err { + return err + } + fmt.Println("=> ", s.Name, s.FdName) + a.services.Store(s.Name, &s) + } + return nil +} + +// Wait waits for signals +func Wait(a *Again) (syscall.Signal, error) { + ch := make(chan os.Signal, 2) + signal.Notify( + ch, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGQUIT, + syscall.SIGTERM, + syscall.SIGUSR1, + syscall.SIGUSR2, + ) + forked := false + for { + sig := <-ch + log.Println(sig.String()) + switch sig { + + // SIGHUP should reload configuration. + case syscall.SIGHUP: + if a.Hooks.OnSIGHUP != nil { + if err := a.Hooks.OnSIGHUP(a); err != nil { + log.Println("OnSIGHUP:", err) + } + } + + // SIGINT should exit. + case syscall.SIGINT: + return syscall.SIGINT, nil + + // SIGQUIT should exit gracefully. + case syscall.SIGQUIT: + if a.Hooks.OnSIGQUIT != nil { + if err := a.Hooks.OnSIGQUIT(a); err != nil { + log.Println("OnSIGQUIT:", err) + } + } + return syscall.SIGQUIT, nil + + // SIGTERM should exit. + case syscall.SIGTERM: + if a.Hooks.OnSIGTERM != nil { + if err := a.Hooks.OnSIGHUP(a); err != nil { + log.Println("OnSIGTERM:", err) + } + } + return syscall.SIGTERM, nil + + // SIGUSR1 should reopen logs. + case syscall.SIGUSR1: + if a.Hooks.OnSIGUSR1 != nil { + if err := a.Hooks.OnSIGUSR1(a); err != nil { + log.Println("OnSIGUSR1:", err) + } + } + + // SIGUSR2 forks and re-execs the first time it is received and execs + // without forking from then on. + case syscall.SIGUSR2: + if OnForkHook != nil { + OnForkHook() + } + if forked { + return syscall.SIGUSR2, nil + } + forked = true + if err := ForkExec(a); nil != err { + return syscall.SIGUSR2, err + } + + } + } +} + +func lookPath() (argv0 string, err error) { + argv0, err = exec.LookPath(os.Args[0]) + if nil != err { + return + } + if _, err = os.Stat(argv0); nil != err { + return + } + return +} + +func setEnvs(a *Again) error { + e, err := a.Env() + if err != nil { + return err + } + for k, v := range e { + os.Setenv(k, v) + } + return nil +} diff --git a/vendor/github.com/TykTechnologies/again/go.mod b/vendor/github.com/TykTechnologies/again/go.mod new file mode 100644 index 00000000000..9d1beb7b532 --- /dev/null +++ b/vendor/github.com/TykTechnologies/again/go.mod @@ -0,0 +1,3 @@ +module github.com/TykTechnologies/again + +go 1.12 diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/.gitignore b/vendor/github.com/TykTechnologies/circuitbreaker/.gitignore new file mode 100644 index 00000000000..9ed3b07cefe --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/CHANGELOG.md b/vendor/github.com/TykTechnologies/circuitbreaker/CHANGELOG.md new file mode 100644 index 00000000000..6dbb18990ab --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/CHANGELOG.md @@ -0,0 +1,230 @@ +# Changelog +All notable changes to this project will be documented in this file. + +## 2.2.0 - 2016-08-09 + +### Added +- Externally provided event listener channel (@spencerkimball) + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Reduce allocations around last failure time storage +- Use the Clock for window code as well +- Remove test data race +- Fix race condition in `state()` (@tamird) + +## 2.1.7 - 2016-07-27 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Set `Backoff.MaxElapsedTime` to 0 as default [@matope] +- Use a lock when modifying `nextBackoff` +- Fix goroutine leak when using timeouts [@isaldana] +- Fix window buckets that should be empty [@isaldana] +- Update backoff package, which has been renamed + +## 2.1.6 - 2016-02-02 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- client.Do() was not returning the error when it timed out [@ryanmurf] + +## 2.1.5 - 2015-11-19 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Respect backoff.Stop [@bc-vincent-zhao] + +## 2.1.4 - 2015-09-01 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- HTTP client was using a new panel object instead of the one it added the breaker to [@ryanmurf] + +## 2.1.3 - 2015-08-05 + +### Added +- Configurable bucket time and number [@thraxil] +- Use mock clock for test [@andreas] + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Bug in statsd bucket name documentation / example [@thraxil] + +## 2.1.2 - 2015-04-03 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Simplify Call() for rate breaker, fixing a reset bug + +## 2.1.1 - 2014-10-29 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Ensure the half opens counter resets when the breaker resets, or auto-resetting may not occur + +## 2.1.0 - 2014-10-16 + +### Added +- Failure, Sucess counts and Error Rate is now calculated over a sliding window +- Number of buckets in the window and the time the window spans are tuneable + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- A race condition in Call() + +## 2.0.2 - 2014-10-13 + +### Added +- ResetCounters + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Nothing + +## 2.0.1 - 2014-10-13 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Error rate should return 0.0 if there have been no samples + +## 2.0.0 - 2014-10-13 + +### Added +- All circuit breakers are now a Breaker with trip semantics handled by a TripFunc +- NewConsecutiveBreaker +- NewRateBreaker +- ConsecFailures +- ErrorRate +- Success +- Successes +- Retry logic now uses cenkalti/backoff, exponential backoff by default + +### Deprecated +- Nothing + +### Removed +- TrippableBreaker, ThresholdBreaker, FrequencyBreaker, TimeoutBreaker; all handled by Breaker now +- NewFrequencyBreaker, replaced by NewConsecutiveBreaker +- NewTimeoutBreaker, time out semantics are now handled by Call() +- NoOp(), use a Breaker with no TripFunc instead + +### Fixed +- Nothing + +## 1.1.2 - 2014-08-20 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Fixed +- For a FrequencyBreaker, Failures() should return the count since the duration start, even after resetting. + +## 1.1.1 - 2014-08-20 + +### Added +- Nothing + +### Deprecated +- Nothing + +### Fixed +- Only send the reset event if the breaker was in a tripped state + +## 1.1.0 - 2014-08-16 + +### Added +- Re-export a Panels Circuits map. It's handy and if you mess it up, it's on you. + +### Deprecated +- Nothing + +### Removed +- Nothing + +### Fixed +- Nothing + +## 1.0.0 - 2014-08-16 + +### Added +- This will be the public API for version 1.0.0. This project will follow semver rules. diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/LICENSE b/vendor/github.com/TykTechnologies/circuitbreaker/LICENSE new file mode 100644 index 00000000000..5a188a06b00 --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2014 Scott Barron + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/README.md b/vendor/github.com/TykTechnologies/circuitbreaker/README.md new file mode 100644 index 00000000000..1e08423b585 --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/README.md @@ -0,0 +1,120 @@ +# circuitbreaker + +Circuitbreaker provides an easy way to use the Circuit Breaker pattern in a +Go program. + +Circuit breakers are typically used when your program makes remote calls. +Remote calls can often hang for a while before they time out. If your +application makes a lot of these requests, many resources can be tied +up waiting for these time outs to occur. A circuit breaker wraps these +remote calls and will trip after a defined amount of failures or time outs +occur. When a circuit breaker is tripped any future calls will avoid making +the remote call and return an error to the caller. In the meantime, the +circuit breaker will periodically allow some calls to be tried again and +will close the circuit if those are successful. + +You can read more about this pattern and how it's used at: +- [Martin Fowler's bliki](http://martinfowler.com/bliki/CircuitBreaker.html) +- [The Netflix Tech Blog](http://techblog.netflix.com/2012/02/fault-tolerance-in-high-volume.html) +- [Release It!](http://pragprog.com/book/mnee/release-it) + +[![GoDoc](https://godoc.org/github.com/rubyist/circuitbreaker?status.svg)](https://godoc.org/github.com/rubyist/circuitbreaker) + +## Installation + +``` + go get github.com/rubyist/circuitbreaker +``` + +## Examples + +Here is a quick example of what circuitbreaker provides + +```go +// Creates a circuit breaker that will trip if the function fails 10 times +cb := circuit.NewThresholdBreaker(10) + +events := cb.Subscribe() +go func() { + for { + e := <-events + // Monitor breaker events like BreakerTripped, BreakerReset, BreakerFail, BreakerReady + } +}() + +cb.Call(func() error { + // This is where you'll do some remote call + // If it fails, return an error +}, 0) +``` + +Circuitbreaker can also wrap a time out around the remote call. + +```go +// Creates a circuit breaker that will trip after 10 failures +// using a time out of 5 seconds +cb := circuit.NewThresholdBreaker(10) + +cb.Call(func() error { + // This is where you'll do some remote call + // If it fails, return an error +}, time.Second * 5) // This will time out after 5 seconds, which counts as a failure + +// Proceed as above + +``` + +Circuitbreaker can also trip based on the number of consecutive failures. + +```go +// Creates a circuit breaker that will trip if 10 consecutive failures occur +cb := circuit.NewConsecutiveBreaker(10) + +// Proceed as above +``` + +Circuitbreaker can trip based on the error rate. + +```go +// Creates a circuit breaker based on the error rate +cb := circuit.NewRateBreaker(0.95, 100) // trip when error rate hits 95%, with at least 100 samples + +// Proceed as above +``` + +If it doesn't make sense to wrap logic in Call(), breakers can be handled manually. + +```go +cb := circuit.NewThresholdBreaker(10) + +for { + if cb.Ready() { + // Breaker is not tripped, proceed + err := doSomething() + if err != nil { + cb.Fail() // This will trip the breaker once it's failed 10 times + continue + } + cb.Success() + } else { + // Breaker is in a tripped state. + } +} +``` + +Circuitbreaker also provides a wrapper around `http.Client` that will wrap a +time out around any request. + +```go +// Passing in nil will create a regular http.Client. +// You can also build your own http.Client and pass it in +client := circuit.NewHTTPClient(time.Second * 5, 10, nil) + +resp, err := client.Get("http://example.com/resource.json") +``` + +See the godoc for more examples. + +## Bugs, Issues, Feedback + +Right here on GitHub: [https://github.com/rubyist/circuitbreaker](https://github.com/rubyist/circuitbreaker) diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/circuitbreaker.go b/vendor/github.com/TykTechnologies/circuitbreaker/circuitbreaker.go new file mode 100644 index 00000000000..2bb8a2494f8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/circuitbreaker.go @@ -0,0 +1,460 @@ +// Package circuit implements the Circuit Breaker pattern. It will wrap +// a function call (typically one which uses remote services) and monitors for +// failures and/or time outs. When a threshold of failures or time outs has been +// reached, future calls to the function will not run. During this state, the +// breaker will periodically allow the function to run and, if it is successful, +// will start running the function again. +// +// Circuit includes three types of circuit breakers: +// +// A Threshold Breaker will trip when the failure count reaches a given threshold. +// It does not matter how long it takes to reach the threshold and the failures do +// not need to be consecutive. +// +// A Consecutive Breaker will trip when the consecutive failure count reaches a given +// threshold. It does not matter how long it takes to reach the threshold, but the +// failures do need to be consecutive. +// +// +// When wrapping blocks of code with a Breaker's Call() function, a time out can be +// specified. If the time out is reached, the breaker's Fail() function will be called. +// +// +// Other types of circuit breakers can be easily built by creating a Breaker and +// adding a custom TripFunc. A TripFunc is called when a Breaker Fail()s and receives +// the breaker as an argument. It then returns true or false to indicate whether the +// breaker should trip. +// +// The package also provides a wrapper around an http.Client that wraps all of +// the http.Client functions with a Breaker. +// +package circuit + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "time" + + "github.com/cenk/backoff" + "github.com/facebookgo/clock" +) + +// BreakerEvent indicates the type of event received over an event channel +type BreakerEvent int + +const ( + // BreakerTripped is sent when a breaker trips + BreakerTripped BreakerEvent = iota + + // BreakerReset is sent when a breaker resets + BreakerReset + + // BreakerFail is sent when Fail() is called + BreakerFail + + // BreakerReady is sent when the breaker enters the half open state and is ready to retry + BreakerReady + + // stops breaker's subscribers + BreakerStop +) + +// ListenerEvent includes a reference to the circuit breaker and the event. +type ListenerEvent struct { + CB *Breaker + Event BreakerEvent +} + +type state int + +const ( + open state = iota + halfopen state = iota + closed state = iota +) + +var ( + defaultInitialBackOffInterval = 500 * time.Millisecond + defaultBackoffMaxElapsedTime = 0 * time.Second +) + +// Error codes returned by Call +var ( + ErrBreakerOpen = errors.New("breaker open") + ErrBreakerTimeout = errors.New("breaker time out") +) + +// TripFunc is a function called by a Breaker's Fail() function and determines whether +// the breaker should trip. It will receive the Breaker as an argument and returns a +// boolean. By default, a Breaker has no TripFunc. +type TripFunc func(*Breaker) bool + +// Breaker is the base of a circuit breaker. It maintains failure and success counters +// as well as the event subscribers. +type Breaker struct { + // BackOff is the backoff policy that is used when determining if the breaker should + // attempt to retry. A breaker created with NewBreaker will use an exponential backoff + // policy by default. + BackOff backoff.BackOff + + // ShouldTrip is a TripFunc that determines whether a Fail() call should trip the breaker. + // A breaker created with NewBreaker will not have a ShouldTrip by default, and thus will + // never automatically trip. + ShouldTrip TripFunc + + // Clock is used for controlling time in tests. + Clock clock.Clock + + _ [4]byte // pad to fix golang issue #599 + consecFailures int64 + lastFailure int64 // stored as nanoseconds since the Unix epoch + halfOpens int64 + counts *window + nextBackOff time.Duration + tripped int32 + broken int32 + stopped int32 + eventReceivers []chan BreakerEvent + listeners []chan ListenerEvent + backoffLock sync.Mutex +} + +// Options holds breaker configuration options. +type Options struct { + BackOff backoff.BackOff + Clock clock.Clock + ShouldTrip TripFunc + WindowTime time.Duration + WindowBuckets int +} + +// NewBreakerWithOptions creates a base breaker with a specified backoff, clock and TripFunc +func NewBreakerWithOptions(options *Options) *Breaker { + if options == nil { + options = &Options{} + } + + if options.Clock == nil { + options.Clock = clock.New() + } + + if options.BackOff == nil { + b := backoff.NewExponentialBackOff() + b.InitialInterval = defaultInitialBackOffInterval + b.MaxElapsedTime = defaultBackoffMaxElapsedTime + b.Clock = options.Clock + b.Reset() + options.BackOff = b + } + + if options.WindowTime == 0 { + options.WindowTime = DefaultWindowTime + } + + if options.WindowBuckets == 0 { + options.WindowBuckets = DefaultWindowBuckets + } + + return &Breaker{ + BackOff: options.BackOff, + Clock: options.Clock, + ShouldTrip: options.ShouldTrip, + nextBackOff: options.BackOff.NextBackOff(), + counts: newWindow(options.WindowTime, options.WindowBuckets), + } +} + +// NewBreaker creates a base breaker with an exponential backoff and no TripFunc +func NewBreaker() *Breaker { + return NewBreakerWithOptions(nil) +} + +// NewThresholdBreaker creates a Breaker with a ThresholdTripFunc. +func NewThresholdBreaker(threshold int64) *Breaker { + return NewBreakerWithOptions(&Options{ + ShouldTrip: ThresholdTripFunc(threshold), + }) +} + +// NewConsecutiveBreaker creates a Breaker with a ConsecutiveTripFunc. +func NewConsecutiveBreaker(threshold int64) *Breaker { + return NewBreakerWithOptions(&Options{ + ShouldTrip: ConsecutiveTripFunc(threshold), + }) +} + +// NewRateBreaker creates a Breaker with a RateTripFunc. +func NewRateBreaker(rate float64, minSamples int64) *Breaker { + return NewBreakerWithOptions(&Options{ + ShouldTrip: RateTripFunc(rate, minSamples), + }) +} + +// Subscribe returns a channel of BreakerEvents. Whenever the breaker changes state, +// the state will be sent over the channel. See BreakerEvent for the types of events. +func (cb *Breaker) Subscribe() <-chan BreakerEvent { + eventReader := make(chan BreakerEvent) + output := make(chan BreakerEvent, 100) + + go func() { + for v := range eventReader { + select { + case output <- v: + // stop subscriber Go-routine if CB was asked to stop + if v == BreakerStop { + return + } + default: + <-output + output <- v + } + } + }() + cb.eventReceivers = append(cb.eventReceivers, eventReader) + return output +} + +// AddListener adds a channel of ListenerEvents on behalf of a listener. +// The listener channel must be buffered. +func (cb *Breaker) AddListener(listener chan ListenerEvent) { + cb.listeners = append(cb.listeners, listener) +} + +// RemoveListener removes a channel previously added via AddListener. +// Once removed, the channel will no longer receive ListenerEvents. +// Returns true if the listener was found and removed. +func (cb *Breaker) RemoveListener(listener chan ListenerEvent) bool { + for i, receiver := range cb.listeners { + if listener == receiver { + cb.listeners = append(cb.listeners[:i], cb.listeners[i+1:]...) + return true + } + } + return false +} + +// Trip will trip the circuit breaker. After Trip() is called, Tripped() will +// return true. +func (cb *Breaker) Trip() { + atomic.StoreInt32(&cb.tripped, 1) + now := cb.Clock.Now() + atomic.StoreInt64(&cb.lastFailure, now.UnixNano()) + cb.sendEvent(BreakerTripped) +} + +// Reset will reset the circuit breaker. After Reset() is called, Tripped() will +// return false. +func (cb *Breaker) Reset() { + atomic.StoreInt32(&cb.broken, 0) + atomic.StoreInt32(&cb.tripped, 0) + atomic.StoreInt64(&cb.halfOpens, 0) + cb.ResetCounters() + cb.sendEvent(BreakerReset) +} + +// ResetCounters will reset only the failures, consecFailures, and success counters +func (cb *Breaker) ResetCounters() { + atomic.StoreInt64(&cb.consecFailures, 0) + cb.counts.Reset() +} + +// Tripped returns true if the circuit breaker is tripped, false if it is reset. +func (cb *Breaker) Tripped() bool { + return atomic.LoadInt32(&cb.tripped) == 1 +} + +// Break trips the circuit breaker and prevents it from auto resetting. Use this when +// manual control over the circuit breaker state is needed. +func (cb *Breaker) Break() { + atomic.StoreInt32(&cb.broken, 1) + cb.Trip() +} + +// Stop stops all go-routines to process events +func (cb *Breaker) Stop() { + atomic.StoreInt32(&cb.stopped, 1) + cb.sendEvent(BreakerStop) +} + +// Failures returns the number of failures for this circuit breaker. +func (cb *Breaker) Failures() int64 { + return cb.counts.Failures() +} + +// ConsecFailures returns the number of consecutive failures that have occured. +func (cb *Breaker) ConsecFailures() int64 { + return atomic.LoadInt64(&cb.consecFailures) +} + +// Successes returns the number of successes for this circuit breaker. +func (cb *Breaker) Successes() int64 { + return cb.counts.Successes() +} + +// Fail is used to indicate a failure condition the Breaker should record. It will +// increment the failure counters and store the time of the last failure. If the +// breaker has a TripFunc it will be called, tripping the breaker if necessary. +func (cb *Breaker) Fail() { + cb.counts.Fail() + atomic.AddInt64(&cb.consecFailures, 1) + now := cb.Clock.Now() + atomic.StoreInt64(&cb.lastFailure, now.UnixNano()) + cb.sendEvent(BreakerFail) + if cb.ShouldTrip != nil && cb.ShouldTrip(cb) { + cb.Trip() + } +} + +// Success is used to indicate a success condition the Breaker should record. If +// the success was triggered by a retry attempt, the breaker will be Reset(). +func (cb *Breaker) Success() { + cb.backoffLock.Lock() + cb.BackOff.Reset() + cb.nextBackOff = cb.BackOff.NextBackOff() + cb.backoffLock.Unlock() + + state := cb.state() + if state == halfopen { + cb.Reset() + } + atomic.StoreInt64(&cb.consecFailures, 0) + cb.counts.Success() +} + +// ErrorRate returns the current error rate of the Breaker, expressed as a floating +// point number (e.g. 0.9 for 90%), since the last time the breaker was Reset. +func (cb *Breaker) ErrorRate() float64 { + return cb.counts.ErrorRate() +} + +// Ready will return true if the circuit breaker is ready to call the function. +// It will be ready if the breaker is in a reset state, or if it is time to retry +// the call for auto resetting. +func (cb *Breaker) Ready() bool { + state := cb.state() + if state == halfopen { + atomic.StoreInt64(&cb.halfOpens, 0) + cb.sendEvent(BreakerReady) + } + return state == closed || state == halfopen +} + +// Call wraps a function the Breaker will protect. A failure is recorded +// whenever the function returns an error. If the called function takes longer +// than timeout to run, a failure will be recorded. +func (cb *Breaker) Call(circuit func() error, timeout time.Duration) error { + return cb.CallContext(context.Background(), circuit, timeout) +} + +// CallContext is same as Call but if the ctx is canceled after the circuit returned an error, +// the error will not be marked as a failure because the call was canceled intentionally. +func (cb *Breaker) CallContext(ctx context.Context, circuit func() error, timeout time.Duration) error { + var err error + + if !cb.Ready() { + return ErrBreakerOpen + } + + if timeout == 0 { + err = circuit() + } else { + c := make(chan error, 1) + go func() { + c <- circuit() + close(c) + }() + + select { + case e := <-c: + err = e + case <-cb.Clock.After(timeout): + err = ErrBreakerTimeout + } + } + + if err != nil { + if ctx.Err() != context.Canceled { + cb.Fail() + } + return err + } + + cb.Success() + return nil +} + +// state returns the state of the TrippableBreaker. The states available are: +// closed - the circuit is in a reset state and is operational +// open - the circuit is in a tripped state +// halfopen - the circuit is in a tripped state but the reset timeout has passed +func (cb *Breaker) state() state { + tripped := cb.Tripped() + if tripped { + if atomic.LoadInt32(&cb.broken) == 1 { + return open + } + + last := atomic.LoadInt64(&cb.lastFailure) + since := cb.Clock.Now().Sub(time.Unix(0, last)) + + cb.backoffLock.Lock() + defer cb.backoffLock.Unlock() + + if cb.nextBackOff != backoff.Stop && since > cb.nextBackOff { + if atomic.CompareAndSwapInt64(&cb.halfOpens, 0, 1) { + cb.nextBackOff = cb.BackOff.NextBackOff() + return halfopen + } + return open + } + return open + } + return closed +} + +func (cb *Breaker) sendEvent(event BreakerEvent) { + for _, receiver := range cb.eventReceivers { + receiver <- event + } + for _, listener := range cb.listeners { + le := ListenerEvent{CB: cb, Event: event} + select { + case listener <- le: + default: + <-listener + listener <- le + } + } +} + +// ThresholdTripFunc returns a TripFunc with that trips whenever +// the failure count meets the threshold. +func ThresholdTripFunc(threshold int64) TripFunc { + return func(cb *Breaker) bool { + return cb.Failures() == threshold + } +} + +// ConsecutiveTripFunc returns a TripFunc that trips whenever +// the consecutive failure count meets the threshold. +func ConsecutiveTripFunc(threshold int64) TripFunc { + return func(cb *Breaker) bool { + return cb.ConsecFailures() == threshold + } +} + +// RateTripFunc returns a TripFunc that trips whenever the +// error rate hits the threshold. The error rate is calculated as such: +// f = number of failures +// s = number of successes +// e = f / (f + s) +// The error rate is calculated over a sliding window of 10 seconds (by default) +// This TripFunc will not trip until there have been at least minSamples events. +func RateTripFunc(rate float64, minSamples int64) TripFunc { + return func(cb *Breaker) bool { + samples := cb.Failures() + cb.Successes() + return samples >= minSamples && cb.ErrorRate() >= rate + } +} diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/client.go b/vendor/github.com/TykTechnologies/circuitbreaker/client.go new file mode 100644 index 00000000000..e91c52b26ca --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/client.go @@ -0,0 +1,170 @@ +package circuit + +import ( + "io" + "net/http" + "net/url" + "time" +) + +// HTTPClient is a wrapper around http.Client that provides circuit breaker capabilities. +// +// By default, the client will use its defaultBreaker. A BreakerLookup function may be +// provided to allow different breakers to be used based on the circumstance. See the +// implementation of NewHostBasedHTTPClient for an example of this. +type HTTPClient struct { + Client *http.Client + BreakerTripped func() + BreakerReset func() + BreakerLookup func(*HTTPClient, interface{}) *Breaker + Panel *Panel + timeout time.Duration +} + +var defaultBreakerName = "_default" + +// NewHTTPClient provides a circuit breaker wrapper around http.Client. +// It wraps all of the regular http.Client functions. Specifying 0 for timeout will +// give a breaker that does not check for time outs. +func NewHTTPClient(timeout time.Duration, threshold int64, client *http.Client) *HTTPClient { + breaker := NewThresholdBreaker(threshold) + return NewHTTPClientWithBreaker(breaker, timeout, client) +} + +// NewHostBasedHTTPClient provides a circuit breaker wrapper around http.Client. This +// client will use one circuit breaker per host parsed from the request URL. This allows +// you to use a single HTTPClient for multiple hosts with one host's breaker not affecting +// the other hosts. +func NewHostBasedHTTPClient(timeout time.Duration, threshold int64, client *http.Client) *HTTPClient { + brclient := NewHTTPClient(timeout, threshold, client) + + brclient.BreakerLookup = func(c *HTTPClient, val interface{}) *Breaker { + rawURL := val.(string) + parsedURL, err := url.Parse(rawURL) + if err != nil { + breaker, _ := c.Panel.Get(defaultBreakerName) + return breaker + } + host := parsedURL.Host + + cb, ok := c.Panel.Get(host) + if !ok { + cb = NewThresholdBreaker(threshold) + c.Panel.Add(host, cb) + } + + return cb + } + + return brclient +} + +// NewHTTPClientWithBreaker provides a circuit breaker wrapper around http.Client. +// It wraps all of the regular http.Client functions using the provided Breaker. +func NewHTTPClientWithBreaker(breaker *Breaker, timeout time.Duration, client *http.Client) *HTTPClient { + if client == nil { + client = &http.Client{} + } + + panel := NewPanel() + panel.Add(defaultBreakerName, breaker) + + brclient := &HTTPClient{Client: client, Panel: panel, timeout: timeout} + brclient.BreakerLookup = func(c *HTTPClient, val interface{}) *Breaker { + cb, _ := c.Panel.Get(defaultBreakerName) + return cb + } + + events := breaker.Subscribe() + go func() { + event := <-events + switch event { + case BreakerTripped: + brclient.runBreakerTripped() + case BreakerReset: + brclient.runBreakerReset() + } + }() + + return brclient +} + +// Do wraps http.Client Do() +func (c *HTTPClient) Do(req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + breaker := c.breakerLookup(req.URL.String()) + err = breaker.Call(func() error { + resp, err = c.Client.Do(req) + return err + }, c.timeout) + return resp, err +} + +// Get wraps http.Client Get() +func (c *HTTPClient) Get(url string) (*http.Response, error) { + var resp *http.Response + breaker := c.breakerLookup(url) + err := breaker.Call(func() error { + aresp, err := c.Client.Get(url) + resp = aresp + return err + }, c.timeout) + return resp, err +} + +// Head wraps http.Client Head() +func (c *HTTPClient) Head(url string) (*http.Response, error) { + var resp *http.Response + breaker := c.breakerLookup(url) + err := breaker.Call(func() error { + aresp, err := c.Client.Head(url) + resp = aresp + return err + }, c.timeout) + return resp, err +} + +// Post wraps http.Client Post() +func (c *HTTPClient) Post(url string, bodyType string, body io.Reader) (*http.Response, error) { + var resp *http.Response + breaker := c.breakerLookup(url) + err := breaker.Call(func() error { + aresp, err := c.Client.Post(url, bodyType, body) + resp = aresp + return err + }, c.timeout) + return resp, err +} + +// PostForm wraps http.Client PostForm() +func (c *HTTPClient) PostForm(url string, data url.Values) (*http.Response, error) { + var resp *http.Response + breaker := c.breakerLookup(url) + err := breaker.Call(func() error { + aresp, err := c.Client.PostForm(url, data) + resp = aresp + return err + }, c.timeout) + return resp, err +} + +func (c *HTTPClient) breakerLookup(val interface{}) *Breaker { + if c.BreakerLookup != nil { + return c.BreakerLookup(c, val) + } + cb, _ := c.Panel.Get(defaultBreakerName) + return cb +} + +func (c *HTTPClient) runBreakerTripped() { + if c.BreakerTripped != nil { + c.BreakerTripped() + } +} + +func (c *HTTPClient) runBreakerReset() { + if c.BreakerReset != nil { + c.BreakerReset() + } +} diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/panel.go b/vendor/github.com/TykTechnologies/circuitbreaker/panel.go new file mode 100644 index 00000000000..cd4e7b6c7ee --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/panel.go @@ -0,0 +1,144 @@ +package circuit + +import ( + "fmt" + "sync" + "time" +) + +var defaultStatsPrefixf = "circuit.%s" + +// Statter interface provides a way to gather statistics from breakers +type Statter interface { + Counter(sampleRate float32, bucket string, n ...int) + Timing(sampleRate float32, bucket string, d ...time.Duration) + Gauge(sampleRate float32, bucket string, value ...string) +} + +// PanelEvent wraps a BreakerEvent and provides the string name of the breaker +type PanelEvent struct { + Name string + Event BreakerEvent +} + +// Panel tracks a group of circuit breakers by name. +type Panel struct { + Statter Statter + StatsPrefixf string + + Circuits map[string]*Breaker + + lastTripTimes map[string]time.Time + tripTimesLock sync.RWMutex + panelLock sync.RWMutex + eventReceivers []chan PanelEvent +} + +// NewPanel creates a new Panel +func NewPanel() *Panel { + return &Panel{ + Circuits: make(map[string]*Breaker), + Statter: &noopStatter{}, + StatsPrefixf: defaultStatsPrefixf, + lastTripTimes: make(map[string]time.Time)} +} + +// Add sets the name as a reference to the given circuit breaker. +func (p *Panel) Add(name string, cb *Breaker) { + p.panelLock.Lock() + p.Circuits[name] = cb + p.panelLock.Unlock() + + events := cb.Subscribe() + + go func() { + for event := range events { + for _, receiver := range p.eventReceivers { + receiver <- PanelEvent{name, event} + } + switch event { + case BreakerTripped: + p.breakerTripped(name) + case BreakerReset: + p.breakerReset(name) + case BreakerFail: + p.breakerFail(name) + case BreakerReady: + p.breakerReady(name) + } + } + }() +} + +// Get retrieves a circuit breaker by name. If no circuit breaker exists, it +// returns the NoOp one and sets ok to false. +func (p *Panel) Get(name string) (*Breaker, bool) { + p.panelLock.RLock() + cb, ok := p.Circuits[name] + p.panelLock.RUnlock() + + if ok { + return cb, ok + } + + return NewBreaker(), ok +} + +// Subscribe returns a channel of PanelEvents. Whenever a breaker changes state, +// the PanelEvent will be sent over the channel. See BreakerEvent for the types of events. +func (p *Panel) Subscribe() <-chan PanelEvent { + eventReader := make(chan PanelEvent) + output := make(chan PanelEvent, 100) + + go func() { + for v := range eventReader { + select { + case output <- v: + default: + <-output + output <- v + } + } + }() + p.eventReceivers = append(p.eventReceivers, eventReader) + return output +} + +func (p *Panel) breakerTripped(name string) { + p.Statter.Counter(1.0, fmt.Sprintf(p.StatsPrefixf, name)+".tripped", 1) + p.tripTimesLock.Lock() + p.lastTripTimes[name] = time.Now() + p.tripTimesLock.Unlock() +} + +func (p *Panel) breakerReset(name string) { + bucket := fmt.Sprintf(p.StatsPrefixf, name) + + p.Statter.Counter(1.0, bucket+".reset", 1) + + p.tripTimesLock.RLock() + lastTrip := p.lastTripTimes[name] + p.tripTimesLock.RUnlock() + + if !lastTrip.IsZero() { + p.Statter.Timing(1.0, bucket+".trip-time", time.Since(lastTrip)) + p.tripTimesLock.Lock() + p.lastTripTimes[name] = time.Time{} + p.tripTimesLock.Unlock() + } +} + +func (p *Panel) breakerFail(name string) { + p.Statter.Counter(1.0, fmt.Sprintf(p.StatsPrefixf, name)+".fail", 1) +} + +func (p *Panel) breakerReady(name string) { + p.Statter.Counter(1.0, fmt.Sprintf(p.StatsPrefixf, name)+".ready", 1) +} + +type noopStatter struct { +} + +func (*noopStatter) Counter(sampleRate float32, bucket string, n ...int) {} +func (*noopStatter) Timing(sampleRate float32, bucket string, d ...time.Duration) {} +func (*noopStatter) Gauge(sampleRate float32, bucket string, value ...string) {} diff --git a/vendor/github.com/TykTechnologies/circuitbreaker/window.go b/vendor/github.com/TykTechnologies/circuitbreaker/window.go new file mode 100644 index 00000000000..ab83187f6ca --- /dev/null +++ b/vendor/github.com/TykTechnologies/circuitbreaker/window.go @@ -0,0 +1,174 @@ +package circuit + +import ( + "container/ring" + "sync" + "time" + + "github.com/facebookgo/clock" +) + +var ( + // DefaultWindowTime is the default time the window covers, 10 seconds. + DefaultWindowTime = time.Millisecond * 10000 + + // DefaultWindowBuckets is the default number of buckets the window holds, 10. + DefaultWindowBuckets = 10 +) + +// bucket holds counts of failures and successes +type bucket struct { + failure int64 + success int64 +} + +// Reset resets the counts to 0 +func (b *bucket) Reset() { + b.failure = 0 + b.success = 0 +} + +// Fail increments the failure count +func (b *bucket) Fail() { + b.failure++ +} + +// Sucecss increments the success count +func (b *bucket) Success() { + b.success++ +} + +// window maintains a ring of buckets and increments the failure and success +// counts of the current bucket. Once a specified time has elapsed, it will +// advance to the next bucket, reseting its counts. This allows the keeping of +// rolling statistics on the counts. +type window struct { + buckets *ring.Ring + bucketTime time.Duration + bucketLock sync.RWMutex + lastAccess time.Time + clock clock.Clock +} + +// newWindow creates a new window. windowTime is the time covering the entire +// window. windowBuckets is the number of buckets the window is divided into. +// An example: a 10 second window with 10 buckets will have 10 buckets covering +// 1 second each. +func newWindow(windowTime time.Duration, windowBuckets int) *window { + buckets := ring.New(windowBuckets) + for i := 0; i < buckets.Len(); i++ { + buckets.Value = &bucket{} + buckets = buckets.Next() + } + + clock := clock.New() + + bucketTime := time.Duration(windowTime.Nanoseconds() / int64(windowBuckets)) + return &window{ + buckets: buckets, + bucketTime: bucketTime, + clock: clock, + lastAccess: clock.Now(), + } +} + +// Fail records a failure in the current bucket. +func (w *window) Fail() { + w.bucketLock.Lock() + b := w.getLatestBucket() + b.Fail() + w.bucketLock.Unlock() +} + +// Success records a success in the current bucket. +func (w *window) Success() { + w.bucketLock.Lock() + b := w.getLatestBucket() + b.Success() + w.bucketLock.Unlock() +} + +// Failures returns the total number of failures recorded in all buckets. +func (w *window) Failures() int64 { + w.bucketLock.RLock() + + var failures int64 + w.buckets.Do(func(x interface{}) { + b := x.(*bucket) + failures += b.failure + }) + + w.bucketLock.RUnlock() + return failures +} + +// Successes returns the total number of successes recorded in all buckets. +func (w *window) Successes() int64 { + w.bucketLock.RLock() + + var successes int64 + w.buckets.Do(func(x interface{}) { + b := x.(*bucket) + successes += b.success + }) + w.bucketLock.RUnlock() + return successes +} + +// ErrorRate returns the error rate calculated over all buckets, expressed as +// a floating point number (e.g. 0.9 for 90%) +func (w *window) ErrorRate() float64 { + var total int64 + var failures int64 + + w.bucketLock.RLock() + w.buckets.Do(func(x interface{}) { + b := x.(*bucket) + total += b.failure + b.success + failures += b.failure + }) + w.bucketLock.RUnlock() + + if total == 0 { + return 0.0 + } + + return float64(failures) / float64(total) +} + +// Reset resets the count of all buckets. +func (w *window) Reset() { + w.bucketLock.Lock() + + w.buckets.Do(func(x interface{}) { + x.(*bucket).Reset() + }) + w.bucketLock.Unlock() +} + +// getLatestBucket returns the current bucket. If the bucket time has elapsed +// it will move to the next bucket, resetting its counts and updating the last +// access time before returning it. getLatestBucket assumes that the caller has +// locked the bucketLock +func (w *window) getLatestBucket() *bucket { + var b *bucket + b = w.buckets.Value.(*bucket) + elapsed := w.clock.Now().Sub(w.lastAccess) + + if elapsed > w.bucketTime { + // Reset the buckets between now and number of buckets ago. If + // that is more that the existing buckets, reset all. + for i := 0; i < w.buckets.Len(); i++ { + w.buckets = w.buckets.Next() + b = w.buckets.Value.(*bucket) + b.Reset() + elapsed = time.Duration(int64(elapsed) - int64(w.bucketTime)) + if elapsed < w.bucketTime { + // Done resetting buckets. + break + } + } + w.lastAccess = w.clock.Now() + } + return b +} diff --git a/vendor/github.com/TykTechnologies/drl/Makefile b/vendor/github.com/TykTechnologies/drl/Makefile new file mode 100644 index 00000000000..9d23086e214 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/Makefile @@ -0,0 +1,15 @@ +.PHONY: all fmt test stresstest tidy + +all: fmt test tidy + +fmt: + go fmt ./... + +test: + go test -race -v -count=1 -cover . + +stresstest: + go test -race -failfast -count=1000 -cover . + +tidy: + go mod tidy \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/drl/drl.go b/vendor/github.com/TykTechnologies/drl/drl.go new file mode 100644 index 00000000000..2ee97216292 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/drl.go @@ -0,0 +1,199 @@ +package drl + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +type Server struct { + HostName string + ID string + LoadPerSec int64 + Percentage float64 + TagHash string +} + +type DRL struct { + Servers *Cache + mutex sync.Mutex + serverIndex map[string]Server + ThisServerID string + CurrentTotal int64 + RequestTokenValue int + currentTokenValue int64 + isClosed int32 + stopC chan struct{} +} + +func (d *DRL) Ready() bool { + return d.IsOpen() +} + +func (d *DRL) IsOpen() bool { + return atomic.LoadInt32(&d.isClosed) == OPEN +} + +func (d *DRL) SetCurrentTokenValue(newValue int64) { + atomic.StoreInt64(&d.currentTokenValue, newValue) +} + +func (d *DRL) CurrentTokenValue() int64 { + return atomic.LoadInt64(&d.currentTokenValue) +} + +func (d *DRL) Init(ctx context.Context) { + d.Servers = NewCache(4 * time.Second) + d.RequestTokenValue = 100 + d.serverIndex = make(map[string]Server) + d.stopC = make(chan struct{}) + + go d.startLoop(ctx) +} + +func (d *DRL) startLoop(ctx context.Context) { + t := time.NewTicker(5 * time.Second) + defer t.Stop() + for { + select { + case <-ctx.Done(): + d.Close() + return + case <-d.stopC: + return + case <-t.C: + d.mutex.Lock() + d.cleanServerList() + d.mutex.Unlock() + } + } +} + +func (d *DRL) uniqueID(s Server) string { + uniqueID := s.ID + "|" + s.HostName + return uniqueID +} + +func (d *DRL) Close() { + wasClosed := atomic.SwapInt32(&d.isClosed, CLOSED) + if wasClosed == 0 { + close(d.stopC) + d.Servers.Close() + } +} + +func (d *DRL) totalLoadAcrossServers() int64 { + var total int64 + for s := range d.serverIndex { + _, found := d.Servers.GetNoExtend(s) + if found { + total += d.serverIndex[s].LoadPerSec + } + } + + d.CurrentTotal = total + + return total +} + +func (d *DRL) cleanServerList() { + toRemove := map[string]bool{} + for s := range d.serverIndex { + _, found := d.Servers.GetNoExtend(s) + if !found { + toRemove[s] = true + } + } + + // Update the server list + for s := range toRemove { + delete(d.serverIndex, s) + } +} + +func (d *DRL) percentagesAcrossServers() { + for s := range d.serverIndex { + _, found := d.Servers.GetNoExtend(s) + if found { + thisServerObject := d.serverIndex[s] + + // The compensation should be flat out based on servers, + // not on current load, it tends to skew too conservative + thisServerObject.Percentage = 1 / float64(d.Servers.Count()) + d.serverIndex[s] = thisServerObject + } + } +} + +func (d *DRL) calculateTokenBucketValue() error { + _, found := d.Servers.Get(d.ThisServerID) + if !found { + return errors.New("Apparently this server does not exist!") + } + // Use our own index + thisServerObject := d.serverIndex[d.ThisServerID] + + var thisTokenValue float64 + thisTokenValue = float64(d.RequestTokenValue) + + if thisServerObject.Percentage > 0 { + thisTokenValue = float64(d.RequestTokenValue) / thisServerObject.Percentage + } + + rounded := Round(thisTokenValue, .5, 0) + d.SetCurrentTokenValue(int64(rounded)) + return nil +} + +func (d *DRL) AddOrUpdateServer(s Server) error { + // Add or update the cache + d.mutex.Lock() + defer d.mutex.Unlock() + + if d.uniqueID(s) != d.ThisServerID { + thisServer, found := d.Servers.GetNoExtend(d.ThisServerID) + if found { + if thisServer.TagHash != s.TagHash { + return errors.New("Node notification from different tag group, ignoring.") + } + } else { + // We don't know enough about our own host, so let's skip for now until we do + return errors.New("DRL has no information on current host, waiting...") + } + } + + if d.serverIndex != nil { + d.serverIndex[d.uniqueID(s)] = s + } + d.Servers.Set(d.uniqueID(s), s) + + // Recalculate totals + d.totalLoadAcrossServers() + + // Recalculate percentages + d.percentagesAcrossServers() + + // Get the current token bucket value: + calcErr := d.calculateTokenBucketValue() + if calcErr != nil { + return calcErr + } + + return nil +} + +func (d *DRL) Report() string { + thisServer, found := d.Servers.GetNoExtend(d.ThisServerID) + if found { + return fmt.Sprintf("[Active Nodes]: %d [Token Bucket Value]: %d [Current Load p/s]: %d [%% of Rate]: %f", + d.Servers.Count(), + d.CurrentTokenValue(), + thisServer.LoadPerSec, + thisServer.Percentage) + } + + return "Error: server doesn't exist!" +} diff --git a/vendor/github.com/TykTechnologies/drl/go.mod b/vendor/github.com/TykTechnologies/drl/go.mod new file mode 100644 index 00000000000..0cb4195bde7 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/go.mod @@ -0,0 +1,9 @@ +module github.com/TykTechnologies/drl + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.6.1 + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/vendor/github.com/TykTechnologies/drl/go.sum b/vendor/github.com/TykTechnologies/drl/go.sum new file mode 100644 index 00000000000..494c4b1fa94 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/TykTechnologies/drl/item.go b/vendor/github.com/TykTechnologies/drl/item.go new file mode 100644 index 00000000000..e57239c54fc --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/item.go @@ -0,0 +1,32 @@ +package drl + +import ( + "sync" + "time" +) + +// Item represents a record in the cache map +type Item struct { + mu sync.RWMutex + data Server + expires *time.Time +} + +func (item *Item) touch(duration time.Duration) { + item.mu.Lock() + expiration := time.Now().Add(duration) + item.expires = &expiration + item.mu.Unlock() +} + +func (item *Item) expired() bool { + var value bool + item.mu.RLock() + if item.expires == nil { + value = true + } else { + value = item.expires.Before(time.Now()) + } + item.mu.RUnlock() + return value +} diff --git a/vendor/github.com/TykTechnologies/drl/ttlcache.go b/vendor/github.com/TykTechnologies/drl/ttlcache.go new file mode 100644 index 00000000000..c1ce83519a2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/ttlcache.go @@ -0,0 +1,132 @@ +package drl + +import ( + "sync" + "sync/atomic" + "time" +) + +// Cache is a synchronized map of items that auto-expire once stale +type Cache struct { + mutex sync.RWMutex + ttl time.Duration + items map[string]*Item + stopC chan struct{} + + isClosed int32 +} + +// IsOpen returns true if cache is open. If true this means the cache is +// operational, since the cache uses a background goroutine to manage ttl, this +// will be false when that background process has been terminated marking this +// cache unsuitable for use. +func (c *Cache) IsOpen() bool { + return atomic.LoadInt32(&c.isClosed) == OPEN +} + +// Set is a thread-safe way to add new items to the map +func (c *Cache) Set(key string, data Server) { + c.mutex.Lock() + item := &Item{data: data} + item.touch(c.ttl) + c.items[key] = item + c.mutex.Unlock() +} + +// Get is a thread-safe way to lookup items +// Every lookup, also touches the item, hence extending it's life +func (c *Cache) Get(key string) (data Server, found bool) { + c.mutex.Lock() + item, exists := c.items[key] + if !exists || item.expired() { + data = Server{} + found = false + } else { + item.touch(c.ttl) + data = item.data + found = true + } + c.mutex.Unlock() + return +} + +// GetNoExtend is a thread-safe way to lookup items +// Every lookup, also touches the item, hence extending it's life +func (c *Cache) GetNoExtend(key string) (data Server, found bool) { + c.mutex.Lock() + item, exists := c.items[key] + if !exists || item.expired() { + data = Server{} + found = false + } else { + data = item.data + found = true + } + c.mutex.Unlock() + return +} + +// Count returns the number of items in the cache +// (helpful for tracking memory leaks) +func (c *Cache) Count() int { + c.mutex.RLock() + count := len(c.items) + c.mutex.RUnlock() + return count +} + +// Close frees up resources used by the cache. +func (c *Cache) Close() { + wasClosed := atomic.SwapInt32(&c.isClosed, CLOSED) + if wasClosed == 0 { + c.stopC <- struct{}{} + c.clear() + close(c.stopC) + } +} + +func (c *Cache) clear() { + c.mutex.Lock() + c.items = nil + c.mutex.Unlock() +} + +func (c *Cache) cleanup() { + c.mutex.Lock() + for key, item := range c.items { + if item.expired() { + delete(c.items, key) + } + } + c.mutex.Unlock() +} + +var minimumCleanupInterval = time.Second + +func (c *Cache) startCleanupTimer() { + duration := c.ttl + if duration < minimumCleanupInterval { + duration = minimumCleanupInterval + } + t := time.NewTicker(duration) + defer t.Stop() + for { + select { + case <-c.stopC: + return + case <-t.C: + c.cleanup() + } + } +} + +// NewCache is a helper to create instance of the Cache struct +func NewCache(duration time.Duration) *Cache { + cache := &Cache{ + ttl: duration, + items: map[string]*Item{}, + stopC: make(chan struct{}), + } + go cache.startCleanupTimer() + return cache +} diff --git a/vendor/github.com/TykTechnologies/drl/util.go b/vendor/github.com/TykTechnologies/drl/util.go new file mode 100644 index 00000000000..a5ad1ae0883 --- /dev/null +++ b/vendor/github.com/TykTechnologies/drl/util.go @@ -0,0 +1,31 @@ +package drl + +import ( + "math" +) + +// Constants for IsOpen indicators. +// +// Go 1.17 adds atomic.Value.Swap which is great, but 1.19 +// adds atomic.Bool and other types. This is a go <1.13 cludge. +const ( + // Zero value - the cache is open and ready to use + OPEN = 0 + + // Closed value - the cache shouldn't be used + CLOSED = 1 +) + +func Round(val float64, roundOn float64, places int) (newVal float64) { + var round float64 + pow := math.Pow(10, float64(places)) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + newVal = round / pow + return +} diff --git a/vendor/github.com/TykTechnologies/goautosocket/.gitignore b/vendor/github.com/TykTechnologies/goautosocket/.gitignore new file mode 100644 index 00000000000..6d8f95faf4d --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +*.gor + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + + +*.sublime-workspace +*.sw* +*.un* + +app.conf.json +docker.conf.json diff --git a/vendor/github.com/TykTechnologies/goautosocket/.travis.yml b/vendor/github.com/TykTechnologies/goautosocket/.travis.yml new file mode 100644 index 00000000000..476900669cf --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.4 + - 1.4.1 + - 1.4.2 +script: + - go test -v -race diff --git a/vendor/github.com/TykTechnologies/goautosocket/LICENSE b/vendor/github.com/TykTechnologies/goautosocket/LICENSE new file mode 100644 index 00000000000..4b6adf12453 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Clement 'cmc' Rey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/TykTechnologies/goautosocket/README.md b/vendor/github.com/TykTechnologies/goautosocket/README.md new file mode 100644 index 00000000000..ecb321231c1 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/README.md @@ -0,0 +1,167 @@ +# GoAutoSocket (GAS) ![Status](https://img.shields.io/badge/status-stable-green.svg?style=plastic) [![Build Status](http://img.shields.io/travis/teh-cmc/goautosocket.svg?style=plastic)](https://travis-ci.org/teh-cmc/goautosocket) [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=plastic)](http://godoc.org/github.com/teh-cmc/goautosocket) + +The GAS library provides auto-reconnecting TCP sockets in a tiny, fully tested, thread-safe API. + +The `TCPClient` struct embeds a `net.TCPConn` and overrides its `Read()` and `Write()` methods, making it entirely compatible with the `net.Conn` interface and the rest of the `net` package. +This means you should be able to use this library by just replacing `net.Dial` with `gas.Dial` in your code. + +## Install + +```bash +get -u github.com/teh-cmc/goautosocket +``` + +## Usage + +To test the library, you can run a local TCP server with: + + $ ncat -l 9999 -k + +and run this code: + +```go +package main + +import ( + "log" + "time" + + "github.com/teh-cmc/goautosocket" +) + +func main() { + // connect to a TCP server + conn, err := gas.Dial("tcp", "localhost:9999") + if err != nil { + log.Fatal(err) + } + + // client sends "hello, world!" to the server every second + for { + _, err := conn.Write([]byte("hello, world!\n")) + if err != nil { + // if the client reached its retry limit, give up + if err == gas.ErrMaxRetries { + log.Println("client gave up, reached retry limit") + return + } + // not a GAS error, just panic + log.Fatal(err) + } + log.Println("client says hello!") + time.Sleep(time.Second) + } +} +``` + +Then try to kill and reboot your server, the client will automatically reconnect and start sending messages again; unless it has reached its retry limit. + +## Examples + +An advanced example of a client writing to a buggy server that's randomly crashing and rebooting: + +```go +package main + +import ( + "log" + "math/rand" + "net" + "sync" + "time" + + "github.com/teh-cmc/goautosocket" +) + +func main() { + rand.Seed(time.Now().UnixNano()) + + // open a server socket + s, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + // save the original port + addr := s.Addr() + + // connect a client to the server + c, err := gas.Dial("tcp", s.Addr().String()) + if err != nil { + log.Fatal(err) + } + defer c.Close() + + // shut down and boot up the server randomly + var swg sync.WaitGroup + swg.Add(1) + go func() { + defer swg.Done() + for i := 0; i < 5; i++ { + log.Println("server up") + time.Sleep(time.Millisecond * 100 * time.Duration(rand.Intn(20))) + if err := s.Close(); err != nil { + log.Fatal(err) + } + log.Println("server down") + time.Sleep(time.Millisecond * 100 * time.Duration(rand.Intn(20))) + s, err = net.Listen("tcp", addr.String()) + if err != nil { + log.Fatal(err) + } + } + }() + + // client writes to the server and reconnects when it has to + // this is the interesting part + var cwg sync.WaitGroup + cwg.Add(1) + go func() { + defer cwg.Done() + for { + if _, err := c.Write([]byte("hello, world!\n")); err != nil { + switch e := err.(type) { + case gas.Error: + if e == gas.ErrMaxRetries { + log.Println("client leaving, reached retry limit") + return + } + default: + log.Fatal(err) + } + } + log.Println("client says hello!") + } + }() + + // terminates the server indefinitely + swg.Wait() + if err := s.Close(); err != nil { + log.Fatal(err) + } + + // wait for the client to give up + cwg.Wait() +} +``` + +You can also find an example with concurrency [here](https://github.com/teh-cmc/goautosocket/blob/master/tcp_client_test.go#L97). + +## Disclaimer + +This was built with my needs in mind, no more, no less. That is, I needed a simple, tested and thread-safe API to handle a situation in which I have: +- on one end, a lot of goroutines concurrently writing to a TCP socket +- on the other end, a TCP server that I have no control over (hence the main reason why UDP is out of the question) and which might be rebooted at anytime +I also needed the ability to give up on sending a message after an abritrary amount of tries/time (i.e., ERR_MAX_TRIES). Pretty straightforward stuff. + +Basically, my use case is [this situation](https://github.com/teh-cmc/goautosocket/blob/master/tcp_client_test.go#L97). + +Surprisingly, I couldn't find such a library (I guess I either didn't look in the right place, or just not hard enough..? oh well); so here it is. +Do not hesitate to send a pull request if this doesn't cover all your needs (and it probably won't), they are more than welcome. + +If you're looking for some more insight, you might also want to look at [this discussion](http://redd.it/3aue82) we had on reddit. + +## License ![License](https://img.shields.io/badge/license-MIT-blue.svg?style=plastic) + +The MIT License (MIT) - see LICENSE for more details + +Copyright (c) 2015 Clement 'cmc' Rey diff --git a/vendor/github.com/TykTechnologies/goautosocket/doc.go b/vendor/github.com/TykTechnologies/goautosocket/doc.go new file mode 100644 index 00000000000..2775775b586 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/doc.go @@ -0,0 +1,57 @@ +// Copyright © 2015 Clement 'cmc' Rey . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +/* +The GAS library provides auto-reconnecting TCP sockets in a +tiny, fully tested, thread-safe API. + +The `TCPClient` struct embeds a `net.TCPConn` and overrides +its `Read()` and `Write()` methods, making it entirely compatible +with the `net.Conn` interface and the rest of the `net` package. +This means you should be able to use this library by just +replacing `net.Dial` with `gas.Dial` in your code. + +To test the library, you can run a local TCP server with: + + $ ncat -l 9999 -k + +and run this code: + + package main + + import ( + "log" + "time" + + "github.com/teh-cmc/goautosocket" + ) + + func main() { + // connect to a TCP server + conn, err := gas.Dial("tcp", "localhost:9999") + if err != nil { + log.Fatal(err) + } + + // client sends "hello, world!" to the server every second + for { + _, err := conn.Write([]byte("hello, world!\n")) + if err != nil { + // if the client reached its retry limit, give up + if err == gas.ErrMaxRetries { + log.Println("client gave up, reached retry limit") + return + } + // not a GAS error, just panic + log.Fatal(err) + } + log.Println("client says hello!") + time.Sleep(time.Second) + } + } + +Then try to kill and reboot your server, the client will automatically reconnect and start sending messages again; unless it has reached its retry limit. +*/ +package gas diff --git a/vendor/github.com/TykTechnologies/goautosocket/error.go b/vendor/github.com/TykTechnologies/goautosocket/error.go new file mode 100644 index 00000000000..777a1e0bccc --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/error.go @@ -0,0 +1,31 @@ +// Copyright © 2015 Clement 'cmc' Rey . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package gas + +// ---------------------------------------------------------------------------- + +// Error is the error type of the GAS package. +// +// It implements the error interface. +type Error int + +const ( + // ErrMaxRetries is returned when the called function failed after the + // maximum number of allowed tries. + ErrMaxRetries Error = 0x01 +) + +// ---------------------------------------------------------------------------- + +// Error returns the error as a string. +func (e Error) Error() string { + switch e { + case 0x01: + return "ErrMaxRetries" + default: + return "unknown error" + } +} diff --git a/vendor/github.com/TykTechnologies/goautosocket/tcp_client.go b/vendor/github.com/TykTechnologies/goautosocket/tcp_client.go new file mode 100644 index 00000000000..d4ad43b07a6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goautosocket/tcp_client.go @@ -0,0 +1,325 @@ +// Copyright © 2015 Clement 'cmc' Rey . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package gas + +import ( + "io" + "os" + "net" + "sync" + "syscall" + "time" +) + +// ---------------------------------------------------------------------------- + +// TCPClient provides a TCP connection with auto-reconnect capabilities. +// +// It embeds a *net.TCPConn and thus implements the net.Conn interface. +// +// Use the SetMaxRetries() and SetRetryInterval() methods to configure retry +// values; otherwise they default to maxRetries=5 and retryInterval=100ms. +// +// TCPClient can be safely used from multiple goroutines. +type TCPClient struct { + *net.TCPConn + + lock sync.RWMutex + + maxRetries int + retryInterval time.Duration +} + +// Dial returns a new net.Conn. +// +// The new client connects to the remote address `raddr` on the network `network`, +// which must be "tcp", "tcp4", or "tcp6". +// +// This complements net package's Dial function. +func Dial(network, addr string) (net.Conn, error) { + raddr, err := net.ResolveTCPAddr(network, addr) + if err != nil { + return nil, err + } + + return DialTCP(network, nil, raddr) +} + +// DialTCP returns a new *TCPClient. +// +// The new client connects to the remote address `raddr` on the network `network`, +// which must be "tcp", "tcp4", or "tcp6". +// If `laddr` is not nil, it is used as the local address for the connection. +// +// This overrides net.TCPConn's DialTCP function. +func DialTCP(network string, laddr, raddr *net.TCPAddr) (*TCPClient, error) { + conn, err := net.DialTCP(network, laddr, raddr) + if err != nil { + return nil, err + } + + return &TCPClient{ + TCPConn: conn, + + lock: sync.RWMutex{}, + + maxRetries: 10, + retryInterval: 10 * time.Millisecond, + }, nil +} + +// ---------------------------------------------------------------------------- + +// SetMaxRetries sets the retry limit for the TCPClient. +// +// Assuming i is the current retry iteration, the total sleep time is +// t = retryInterval * (2^i) +// +// This function completely Lock()s the TCPClient. +func (c *TCPClient) SetMaxRetries(maxRetries int) { + c.lock.Lock() + defer c.lock.Unlock() + + c.maxRetries = maxRetries +} + +// GetMaxRetries gets the retry limit for the TCPClient. +// +// Assuming i is the current retry iteration, the total sleep time is +// t = retryInterval * (2^i) +func (c *TCPClient) GetMaxRetries() int { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.maxRetries +} + +// SetRetryInterval sets the retry interval for the TCPClient. +// +// Assuming i is the current retry iteration, the total sleep time is +// t = retryInterval * (2^i) +// +// This function completely Lock()s the TCPClient. +func (c *TCPClient) SetRetryInterval(retryInterval time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + + c.retryInterval = retryInterval +} + +// GetRetryInterval gets the retry interval for the TCPClient. +// +// Assuming i is the current retry iteration, the total sleep time is +// t = retryInterval * (2^i) +func (c *TCPClient) GetRetryInterval() time.Duration { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.retryInterval +} + +// ---------------------------------------------------------------------------- + +// reconnect builds a new TCP connection to replace the embedded *net.TCPConn. +// +// This function completely Lock()s the TCPClient. +// +// TODO: keep old socket configuration (timeout, linger...). +func (c *TCPClient) reconnect() error { + c.lock.Lock() + defer c.lock.Unlock() + + raddr := c.TCPConn.RemoteAddr() + conn, err := net.DialTCP(raddr.Network(), nil, raddr.(*net.TCPAddr)) + if err != nil { + return err + } + + c.TCPConn.Close() + c.TCPConn = conn + return nil +} + +// ---------------------------------------------------------------------------- + +// Read wraps net.TCPConn's Read method with reconnect capabilities. +// +// It will return ErrMaxRetries if the retry limit is reached. +func (c *TCPClient) Read(b []byte) (int, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + disconnected := false + + t := c.retryInterval + for i := 0; i < c.maxRetries; i++ { + if disconnected { + time.Sleep(t) + t *= 2 + c.lock.RUnlock() + if err := c.reconnect(); err != nil { + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNREFUSED { + disconnected = true + c.lock.RLock() + continue + } + return -1, err + default: + return -1, err + } + } else { + disconnected = false + } + c.lock.RLock() + } + n, err := c.TCPConn.Read(b) + if err == nil { + return n, err + } + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNRESET || + errno(e.Err) == syscall.EPIPE { + disconnected = true + } else { + return n, err + } + default: + if err.Error() == "EOF" { + disconnected = true + } else { + return n, err + } + } + t *= 2 + } + + return -1, ErrMaxRetries +} + +// ReadFrom wraps net.TCPConn's ReadFrom method with reconnect capabilities. +// +// It will return ErrMaxRetries if the retry limit is reached. +func (c *TCPClient) ReadFrom(r io.Reader) (int64, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + disconnected := false + + t := c.retryInterval + for i := 0; i < c.maxRetries; i++ { + if disconnected { + time.Sleep(t) + t *= 2 + c.lock.RUnlock() + if err := c.reconnect(); err != nil { + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNREFUSED { + disconnected = true + c.lock.RLock() + continue + } + return -1, err + default: + return -1, err + } + } else { + disconnected = false + } + c.lock.RLock() + } + n, err := c.TCPConn.ReadFrom(r) + if err == nil { + return n, err + } + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNRESET || + errno(e.Err) == syscall.EPIPE { + disconnected = true + } else { + return n, err + } + default: + if err.Error() == "EOF" { + disconnected = true + } else { + return n, err + } + } + t *= 2 + } + + return -1, ErrMaxRetries +} + +// Write wraps net.TCPConn's Write method with reconnect capabilities. +// +// It will return ErrMaxRetries if the retry limit is reached. +func (c *TCPClient) Write(b []byte) (int, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + disconnected := false + + t := c.retryInterval + for i := 0; i < c.maxRetries; i++ { + if disconnected { + time.Sleep(t) + t *= 2 + c.lock.RUnlock() + if err := c.reconnect(); err != nil { + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNREFUSED { + disconnected = true + c.lock.RLock() + continue + } + return -1, err + default: + return -1, err + } + } else { + disconnected = false + } + c.lock.RLock() + } + n, err := c.TCPConn.Write(b) + if err == nil { + return n, err + } + switch e := err.(type) { + case *net.OpError: + if errno(e.Err) == syscall.ECONNRESET || + errno(e.Err) == syscall.EPIPE { + disconnected = true + } else { + return n, err + } + default: + return n, err + } + } + + return -1, ErrMaxRetries +} + +func errno(err error) syscall.Errno { + switch v := err.(type) { + case syscall.Errno: + return v + case *os.SyscallError: + if errno, ok := v.Err.(syscall.Errno); ok { + return errno + } + } + + return syscall.Errno(0x0) +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/.gitignore b/vendor/github.com/TykTechnologies/gojsonschema/.gitignore new file mode 100644 index 00000000000..c1e0636fd4d --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/.gitignore @@ -0,0 +1 @@ +*.sw[nop] diff --git a/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml b/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml new file mode 100644 index 00000000000..9cc01e8abd2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 +before_install: + - go get github.com/sigu-399/gojsonreference + - go get github.com/sigu-399/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000000..55ede8a42cc --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/TykTechnologies/gojsonschema/README.md b/vendor/github.com/TykTechnologies/gojsonschema/README.md new file mode 100644 index 00000000000..127bdd16804 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/README.md @@ -0,0 +1,236 @@ +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema, based on IETF's draft v4 - Go language + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } + +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +## Formats +JSON Schema allows for optional "format" property to validate strings against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: +````json +{"type": "string", "format": "email"} +```` +Available formats: date-time, hostname, email, ipv4, ipv6, uri. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input string) bool { + return strings.HasPrefix("ROLE_", input) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/TykTechnologies/gojsonschema/errors.go b/vendor/github.com/TykTechnologies/gojsonschema/errors.go new file mode 100644 index 00000000000..1090decb9c4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/errors.go @@ -0,0 +1,306 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + // RequiredError. ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError. ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError. ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError. ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError. ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError. ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError. ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError. ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // EnumError. ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError. ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError. ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError. ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError. ErrorDetails: type + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError. ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError. ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError. ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError. ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // StringLengthGTEError. ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError. ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // StringNumericGTEError. ErrorDetails: min_numeric + StringNumericGTEError struct { + ResultErrorFields + } + + // StringSpecialGTEError. ErrorDetails: min_special + StringSpecialGTEError struct { + ResultErrorFields + } + + // StringMultiCaseError. ErrorDetails: multi_case + StringMultiCaseError struct { + ResultErrorFields + } + + // StringSequentialError. ErrorDetails: disable_sequential + StringSequentialError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError. ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError. ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError. ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError. ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError. ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError. ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError. ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *StringNumericGTEError: + t = "numeric_gte" + d = locale.NumericGTE() + case *StringSpecialGTEError: + t = "special_gte" + d = locale.SpecialGTE() + case *StringMultiCaseError: + t = "multi_case" + d = locale.MultiCase() + case *StringSequentialError: + t = "disable_sequential" + d = locale.Sequential() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + details["field"] = err.Field() + err.SetDescription(formatErrorDescription(d, details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go b/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go new file mode 100644 index 00000000000..c7214b0455b --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go @@ -0,0 +1,194 @@ +package gojsonschema + +import ( + "net" + "net/url" + "reflect" + "regexp" + "strings" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + IsFormat(input string) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatter verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the ipv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the ipv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // URIFormatCheckers validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} +) + +var ( + // Formatters holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + }, + } + + // Regex credit: https://github.com/asaskevich/govalidator + rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + c.formatters[name] = f + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + delete(c.formatters, name) + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + _, ok := c.formatters[name] + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + f, ok := c.formatters[name] + + if !ok { + return false + } + + if !isKind(input, reflect.String) { + return false + } + + inputString := input.(string) + + return f.IsFormat(inputString) +} + +func (f EmailFormatChecker) IsFormat(input string) bool { + return rxEmail.MatchString(input) +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV4FormatChecker) IsFormat(input string) bool { + ip := net.ParseIP(input) + return ip != nil && strings.Contains(input, ".") +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV6FormatChecker) IsFormat(input string) bool { + ip := net.ParseIP(input) + return ip != nil && strings.Contains(input, ":") +} + +func (f DateTimeFormatChecker) IsFormat(input string) bool { + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, input); err == nil { + return true + } + } + + return false +} + +func (f URIFormatChecker) IsFormat(input string) bool { + u, err := url.Parse(input) + if err != nil || u.Scheme == "" { + return false + } + + return true +} + +func (f HostnameFormatChecker) IsFormat(input string) bool { + return rxHostname.MatchString(input) && len(input) < 256 +} + +func (f UUIDFormatChecker) IsFormat(input string) bool { + return rxUUID.MatchString(input) +} + +// IsFormat implements FormatChecker interface. +func (f RegexFormatChecker) IsFormat(input string) bool { + if input == "" { + return true + } + _, err := regexp.Compile(input) + if err != nil { + return false + } + return true +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml b/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml new file mode 100644 index 00000000000..7aef8c0951d --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml @@ -0,0 +1,12 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +- package: github.com/stretchr/testify/assert + version: ^1.1.3 + diff --git a/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go b/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go new file mode 100644 index 00000000000..4ef7a8d03e7 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go b/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go new file mode 100644 index 00000000000..fcc8d9d6f1f --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go @@ -0,0 +1,72 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// jsonContext implements a persistent linked-list of strings +type jsonContext struct { + head string + tail *jsonContext +} + +func newJsonContext(head string, tail *jsonContext) *jsonContext { + return &jsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *jsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *jsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go b/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go new file mode 100644 index 00000000000..cab6ed05bdc --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go @@ -0,0 +1,340 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSON loader interface + +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +type JSONLoaderFactory interface { + New(source string) JSONLoader +} + +type DefaultJSONLoaderFactory struct { +} + +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1) + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + if strings.HasPrefix(filename, "/") { + filename = filename[1:] + } + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToUrl.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewStringLoader(source string) *jsonStringLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewBytesLoader(source []byte) *jsonBytesLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewGoLoader(source interface{}) *jsonGoLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJsonUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/locales.go b/vendor/github.com/TykTechnologies/gojsonschema/locales.go new file mode 100644 index 00000000000..1c3171d1090 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/locales.go @@ -0,0 +1,300 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for definining custom error strings + locale interface { + Required() string + InvalidType() string + NumberAnyOf() string + NumberOneOf() string + NumberAllOf() string + NumberNot() string + MissingDependency() string + Internal() string + Enum() string + ArrayNotEnoughItems() string + ArrayNoAdditionalItems() string + ArrayMinItems() string + ArrayMaxItems() string + Unique() string + ArrayMinProperties() string + ArrayMaxProperties() string + AdditionalPropertyNotAllowed() string + InvalidPropertyPattern() string + StringGTE() string + StringLTE() string + NumericGTE() string + SpecialGTE() string + MultiCase() string + Sequential() string + DoesNotMatchPattern() string + DoesNotMatchFormat() string + MultipleOf() string + NumberGTE() string + NumberGT() string + NumberLTE() string + NumberLT() string + + // Schema validations + RegexPattern() string + GreaterThanZero() string + MustBeOfA() string + MustBeOfAn() string + CannotBeUsedWithout() string + CannotBeGT() string + MustBeOfType() string + MustBeValidRegex() string + MustBeValidFormat() string + MustBeGTEZero() string + KeyCannotBeGreaterThan() string + KeyItemsMustBeOfType() string + KeyItemsMustBeUnique() string + ReferenceMustBeCanonical() string + NotAValidType() string + Duplicated() string + httpBadStatus() string + + // ErrorFormat + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +func (l DefaultLocale) Unique() string { + return `{{.type}} items must be unique` +} + +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +func (l DefaultLocale) NumericGTE() string { + return `String must include at least {{.min_numeric}} numeric characters` +} + +func (l DefaultLocale) SpecialGTE() string { + return `String must include at least {{.min_special}} special characters (like '@', '$', '*' etc.)` +} + +func (l DefaultLocale) MultiCase() string { + return `String must include both lower and upper case characters` +} + +func (l DefaultLocale) Sequential() string { + return `String must not include sequential chars: {{.sequential_chars}}` +} + +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +func (l DefaultLocale) NotAValidType() string { + return `{{.type}} is not a valid type -- ` +} + +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +func (l DefaultLocale) httpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/TykTechnologies/gojsonschema/result.go b/vendor/github.com/TykTechnologies/gojsonschema/result.go new file mode 100644 index 00000000000..6ad56ae8656 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/result.go @@ -0,0 +1,172 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + Field() string + SetType(string) + Type() string + SetContext(*jsonContext) + Context() *jsonContext + SetDescription(string) + Description() string + SetValue(interface{}) + Value() interface{} + SetDetails(ErrorDetails) + Details() ErrorDetails + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field outputs the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + if p, ok := v.Details()["property"]; ok { + if str, isString := p.(string); isString { + return str + } + } + + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +func (v *ResultErrorFields) SetContext(context *jsonContext) { + v.context = context +} + +func (v *ResultErrorFields) Context() *jsonContext { + return v.context +} + +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +func (v *ResultErrorFields) Description() string { + return v.description +} + +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJsonString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +func (v *Result) Errors() []ResultError { + return v.errors +} + +func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schema.go b/vendor/github.com/TykTechnologies/gojsonschema/schema.go new file mode 100644 index 00000000000..f71212917c8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/schema.go @@ -0,0 +1,986 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + // "encoding/json" + "errors" + "reflect" + "regexp" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} +) + +func NewSchema(l JSONLoader) (*Schema, error) { + ref, err := l.JsonReference() + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = newSchemaPool(l.LoaderFactory()) + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = l.LoadJSON() + if err != nil { + return nil, err + } + d.pool.SetStandaloneDocument(doc) + } + + err = d.parse(doc) + if err != nil { + return nil, err + } + + return &d, nil +} + +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY} + return d.parseSchema(document, d.rootSchema) +} + +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_OBJECT, + "given": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema == d.rootSchema { + currentSchema.ref = &d.documentReference + } + + // $subSchema + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_SCHEMA, + }, + )) + } + schemaRef := m[KEY_SCHEMA].(string) + schemaReference, err := gojsonreference.NewJsonReference(schemaRef) + currentSchema.subSchema = &schemaReference + if err != nil { + return err + } + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + if jsonReference.HasFullUrl { + currentSchema.ref = &jsonReference + } else { + inheritedReference, err := currentSchema.ref.Inherits(jsonReference) + if err != nil { + return err + } + + currentSchema.ref = inheritedReference + } + + if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { + currentSchema.refSchema = sch + + } else { + err := d.parseReference(documentNode, currentSchema, k) + if err != nil { + return err + } + + return nil + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map) { + currentSchema.definitions = make(map[string]*subSchema) + for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map) { + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.definitions[dk] = newSchema + err := d.parseSchema(dv, newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // id + if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_ID, + }, + )) + } + if k, ok := m[KEY_ID].(string); ok { + currentSchema.id = &k + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } else { + currentSchema.types.Add(typeInArray.(string)) + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if *multipleOfValue <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool) + currentSchema.exclusiveMinimum = exclusiveMinimumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool) + currentSchema.exclusiveMaximum = exclusiveMaximumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER}, + )) + } + } + + if currentSchema.minimum != nil && currentSchema.maximum != nil { + if *currentSchema.minimum > *currentSchema.maximum { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, + )) + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_MIN_NUMERIC) { + minNumericIntegerValue := mustBeInteger(m[KEY_MIN_NUMERIC]) + if minNumericIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_NUMERIC, "y": TYPE_INTEGER}, + )) + } + if *minNumericIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_NUMERIC}, + )) + } + currentSchema.minNumeric = minNumericIntegerValue + } + + if existsMapKey(m, KEY_MIN_SPECIAL) { + minSpecialIntegerValue := mustBeInteger(m[KEY_MIN_SPECIAL]) + if minSpecialIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_SPECIAL, "y": TYPE_INTEGER}, + )) + } + if *minSpecialIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_SPECIAL}, + )) + } + currentSchema.minSpecial = minSpecialIntegerValue + } + + if existsMapKey(m, KEY_MULTI_CASE) { + if isKind(m[KEY_MULTI_CASE], reflect.Bool) { + currentSchema.multiCase = m[KEY_MULTI_CASE].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MULTI_CASE, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_DISABLE_SEQUENTIAL) { + if isKind(m[KEY_DISABLE_SEQUENTIAL], reflect.Bool) { + currentSchema.disableSequential = m[KEY_DISABLE_SEQUENTIAL].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_DISABLE_SEQUENTIAL, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if ok && FormatCheckers.Has(formatString) { + currentSchema.format = formatString + } else { + return errors.New(formatErrorDescription( + Locale.MustBeValidFormat(), + ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]}, + )) + } + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + err := currentSchema.AddRequired(requiredValue.(string)) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + // validation : all + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + err := currentSchema.AddEnum(v) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddOneOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAnyOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAllOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.SetNot(newSchema) + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error { + var refdDocumentNode interface{} + jsonPointer := currentSchema.ref.GetPointer() + standaloneDocument := d.pool.GetStandaloneDocument() + + if standaloneDocument != nil { + + var err error + refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) + if err != nil { + return err + } + + } else { + dsp, err := d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + + refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) + if err != nil { + return err + } + + } + + if !isKind(refdDocumentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + // returns the loaded referenced subSchema for the caller to update its current subSchema + newSchemaDocument := refdDocumentNode.(map[string]interface{}) + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + d.referencePool.Add(currentSchema.ref.String()+reference, newSchema) + + err := d.parseSchema(newSchemaDocument, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddPropertiesChild(newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } else { + valuesToRegister = append(valuesToRegister, value.(string)) + } + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go new file mode 100644 index 00000000000..f2ad641af3c --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go @@ -0,0 +1,109 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + standaloneDocument interface{} + jsonLoaderFactory JSONLoaderFactory +} + +func newSchemaPool(f JSONLoaderFactory) *schemaPool { + + p := &schemaPool{} + p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) + p.standaloneDocument = nil + p.jsonLoaderFactory = f + + return p +} + +func (p *schemaPool) SetStandaloneDocument(document interface{}) { + p.standaloneDocument = document +} + +func (p *schemaPool) GetStandaloneDocument() (document interface{}) { + return p.standaloneDocument +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + var err error + + // It is not possible to load anything that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference}, + )) + } + + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + var spd *schemaPoolDocument + + // Try to find the requested document in the pool + for k := range p.schemaPoolDocuments { + if k == refToUrl.String() { + spd = p.schemaPoolDocuments[k] + } + } + + if spd != nil { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + if err != nil { + return nil, err + } + + spd = &schemaPoolDocument{Document: document} + // add the document to the pool for potential later use + p.schemaPoolDocuments[refToUrl.String()] = spd + + return spd, nil +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go new file mode 100644 index 00000000000..294e36a732a --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go @@ -0,0 +1,67 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + + p.documents[ref] = sch +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go new file mode 100644 index 00000000000..e13a0fb0cbf --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"type": etype})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go b/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go new file mode 100644 index 00000000000..75abc5f5262 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go @@ -0,0 +1,235 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "errors" + "regexp" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +const ( + KEY_SCHEMA = "$subSchema" + KEY_ID = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_MIN_NUMERIC = "minNumeric" + KEY_MIN_SPECIAL = "minSpecial" + KEY_MULTI_CASE = "multiCase" + KEY_DISABLE_SEQUENTIAL = "disableSequential" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" +) + +type subSchema struct { + + // basic subSchema meta properties + id *string + title *string + description *string + + property string + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + // Json reference + subSchema *gojsonreference.JsonReference + + // hierarchy + parent *subSchema + definitions map[string]*subSchema + definitionsChildren []*subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *float64 + maximum *float64 + exclusiveMaximum bool + minimum *float64 + exclusiveMinimum bool + + // validation : string + minLength *int + maxLength *int + minNumeric *int + minSpecial *int + multiCase bool + disableSequential bool + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + + additionalItems interface{} + + // validation : all + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema +} + +func (s *subSchema) AddEnum(i interface{}) error { + + is, err := marshalToJsonString(i) + if err != nil { + return err + } + + if isStringInSlice(s.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + + s.enum = append(s.enum, *is) + + return nil +} + +func (s *subSchema) ContainsEnum(i interface{}) (bool, error) { + + is, err := marshalToJsonString(i) + if err != nil { + return false, err + } + + return isStringInSlice(s.enum, *is), nil +} + +func (s *subSchema) AddOneOf(subSchema *subSchema) { + s.oneOf = append(s.oneOf, subSchema) +} + +func (s *subSchema) AddAllOf(subSchema *subSchema) { + s.allOf = append(s.allOf, subSchema) +} + +func (s *subSchema) AddAnyOf(subSchema *subSchema) { + s.anyOf = append(s.anyOf, subSchema) +} + +func (s *subSchema) SetNot(subSchema *subSchema) { + s.not = subSchema +} + +func (s *subSchema) AddRequired(value string) error { + + if isStringInSlice(s.required, value) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + + s.required = append(s.required, value) + + return nil +} + +func (s *subSchema) AddDefinitionChild(child *subSchema) { + s.definitionsChildren = append(s.definitionsChildren, child) +} + +func (s *subSchema) AddItemsChild(child *subSchema) { + s.itemsChildren = append(s.itemsChildren, child) +} + +func (s *subSchema) AddPropertiesChild(child *subSchema) { + s.propertiesChildren = append(s.propertiesChildren, child) +} + +func (s *subSchema) PatternPropertiesString() string { + + if s.patternProperties == nil || len(s.patternProperties) == 0 { + return STRING_UNDEFINED // should never happen + } + + patternPropertiesKeySlice := []string{} + for pk := range s.patternProperties { + patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) + } + + if len(patternPropertiesKeySlice) == 1 { + return patternPropertiesKeySlice[0] + } + + return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]" + +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/types.go b/vendor/github.com/TykTechnologies/gojsonschema/types.go new file mode 100644 index 00000000000..952d22ef65e --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/types.go @@ -0,0 +1,58 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +var JSON_TYPES []string +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/utils.go b/vendor/github.com/TykTechnologies/gojsonschema/utils.go new file mode 100644 index 00000000000..26cf75ebf79 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/utils.go @@ -0,0 +1,208 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" +) + +func isKind(what interface{}, kind reflect.Kind) bool { + target := what + if isJsonNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + return reflect.ValueOf(target).Kind() == kind +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +func marshalToJsonString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func isJsonNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) { + + jsonNumber := what.(json.Number) + + f64, errFloat64 := jsonNumber.Float64() + s64 := strconv.FormatFloat(f64, 'f', -1, 64) + _, errInt64 := strconv.ParseInt(s64, 10, 64) + + isValidFloat64 = errFloat64 == nil + isValidInt64 = errInt64 == nil + + _, errInt32 := strconv.ParseInt(s64, 10, 32) + isValidInt32 = isValidInt64 && errInt32 == nil + + return + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func isFloat64AnInteger(f float64) bool { + + if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float { + return false + } + + return f == float64(int64(f)) || f == float64(uint64(f)) +} + +func mustBeInteger(what interface{}) *int { + + if isJsonNumber(what) { + + number := what.(json.Number) + + _, _, isValidInt32 := checkJsonNumber(number) + + if isValidInt32 { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + + } else { + return nil + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *float64 { + + if isJsonNumber(what) { + + number := what.(json.Number) + float64Value, err := number.Float64() + + if err == nil { + return &float64Value + } else { + return nil + } + + } + + return nil + +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatJsonNumber(n json.Number) string { + + if int64Value, err := n.Int64(); err == nil { + return fmt.Sprintf("%d", int64Value) + } + + float64Value, _ := n.Float64() + + return fmt.Sprintf("%g", float64Value) +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatNumber(n float64) string { + + if isFloat64AnInteger(n) { + return fmt.Sprintf("%d", int64(n)) + } + + return fmt.Sprintf("%g", n) +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/validation.go b/vendor/github.com/TykTechnologies/gojsonschema/validation.go new file mode 100644 index 00000000000..597e0a1c7dc --- /dev/null +++ b/vendor/github.com/TykTechnologies/gojsonschema/validation.go @@ -0,0 +1,901 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + + var err error + + // load schema + + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + + // begine validation + + return schema.Validate(ld) + +} + +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + + // load document + + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + + // begin validation + + result := &Result{} + context := newJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + + return result, nil + +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJsonNumber(currentNode) { + + value := currentNode.(json.Number) + + _, isValidInt64, _ := checkJsonNumber(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isValidInt64 { + givenType = TYPE_NUMBER + } + + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := newJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + + } + } + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // enum: + if len(currentSubSchema.enum) > 0 { + has, err := currentSubSchema.ContainsEnum(value) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !has { + result.addError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems []string + for _, v := range value { + vString, err := marshalToJsonString(v) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if isStringInSlice(stringifiedItems, *vString) { + result.addError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY}, + ) + } + stringifiedItems = append(stringifiedItems, *vString) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + if currentSubSchema.additionalProperties != nil { + + switch currentSubSchema.additionalProperties.(type) { + case bool: + + if !currentSubSchema.additionalProperties.(bool) { + + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value, + ErrorDetails{"property": pk}, + ) + } + + } else { + + if !pp_has || !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value, + ErrorDetails{"property": pk}, + ) + } + + } + } + } + + case *subSchema: + + additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema) + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } else { + + if !pp_has || !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } + + } + } + } else { + + for pk := range value { + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if pp_has && !pp_match { + + result.addError( + new(InvalidPropertyPatternError), + context, + value, + ErrorDetails{ + "property": pk, + "pattern": currentSubSchema.PatternPropertiesString(), + }, + ) + } + + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + has = false + + validatedkey := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + has = true + subContext := newJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + if validationResult.Valid() { + validatedkey = true + } + } + } + + if !validatedkey { + return has, false + } + + result.incrementScore() + + return has, true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore JSON numbers + if isJsonNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + if currentSubSchema.minNumeric != nil { + re := regexp.MustCompile(`\pN`) + + if len(re.FindAllString(stringValue, -1)) < int(*currentSubSchema.minNumeric) { + result.addError( + new(StringNumericGTEError), + context, + value, + ErrorDetails{"min_numeric": *currentSubSchema.minNumeric}, + ) + } + } + + if currentSubSchema.minSpecial != nil { + re := regexp.MustCompile(`[^\pN\pL]`) + + if len(re.FindAllString(stringValue, -1)) < int(*currentSubSchema.minSpecial) { + result.addError( + new(StringSpecialGTEError), + context, + value, + ErrorDetails{"min_special": *currentSubSchema.minSpecial}, + ) + } + } + + if currentSubSchema.multiCase { + // See http://www.regular-expressions.info/unicode.html on unicode regexp docs + reL := regexp.MustCompile(`[\p{Ll}\pN]`) + reU := regexp.MustCompile(`\p{Lu}`) + + if len(reL.FindAllString(stringValue, -1)) == 0 || len(reU.FindAllString(stringValue, -1)) == 0 { + result.addError( + new(StringMultiCaseError), + context, + value, + ErrorDetails{"multi_case": currentSubSchema.multiCase}, + ) + } + } + + if currentSubSchema.disableSequential { + re := regexp.MustCompile("(?i)(abc|bcd|cde|def|efg|fgh|ghi|hij|ijk|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz|012|123|234|345|456|567|678|789)") + + var seq []string + + for i := range stringValue { + if i < 2 { + continue + } + + if stringValue[i-2] == stringValue[i-1] && stringValue[i-1] == stringValue[i] { + seq = append(seq, stringValue[i-2:i]) + } + } + + m := re.FindAllString(stringValue, -1) + allM := append(seq, m...) + + if len(allM) > 0 { + result.addError( + new(StringSequentialError), + context, + value, + ErrorDetails{"sequential_chars": strings.Join(allM, ", ")}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore non numbers + if !isJsonNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := number.Float64() + + // multipleOf: + if currentSubSchema.multipleOf != nil { + + if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { + result.addError( + new(MultipleOfError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{"multiple": *currentSubSchema.multipleOf}, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if currentSubSchema.exclusiveMaximum { + if float64Value >= *currentSubSchema.maximum { + result.addError( + new(NumberLTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } else { + if float64Value > *currentSubSchema.maximum { + result.addError( + new(NumberLTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if currentSubSchema.exclusiveMinimum { + if float64Value <= *currentSubSchema.minimum { + result.addError( + new(NumberGTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } else { + if float64Value < *currentSubSchema.minimum { + result.addError( + new(NumberGTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } + } + + result.incrementScore() +} diff --git a/vendor/github.com/TykTechnologies/gorpc/.gitignore b/vendor/github.com/TykTechnologies/gorpc/.gitignore new file mode 100644 index 00000000000..c93cf804d2e --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/.gitignore @@ -0,0 +1,3 @@ +tags +*.pprof +*.test diff --git a/vendor/github.com/TykTechnologies/gorpc/LICENSE b/vendor/github.com/TykTechnologies/gorpc/LICENSE new file mode 100644 index 00000000000..a43e7ad1c92 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Aliaksandr Valialkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/TykTechnologies/gorpc/Makefile b/vendor/github.com/TykTechnologies/gorpc/Makefile new file mode 100644 index 00000000000..a59dca751c5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/Makefile @@ -0,0 +1,24 @@ +test: + GOMAXPROCS=1 go test + GOMAXPROCS=2 go test + GOMAXPROCS=4 go test + GOMAXPROCS=8 go test + +test-386: + GOARCH=386 GOMAXPROCS=1 go test + GOARCH=386 GOMAXPROCS=2 go test + GOARCH=386 GOMAXPROCS=4 go test + GOARCH=386 GOMAXPROCS=8 go test + +bench-1-goprocs: + GOMAXPROCS=1 go test -test.bench=".*" + +bench-2-goprocs: + GOMAXPROCS=2 go test -test.bench=".*" + +bench-4-goprocs: + GOMAXPROCS=4 go test -test.bench=".*" + +bench-8-goprocs: + GOMAXPROCS=8 go test -test.bench=".*" + diff --git a/vendor/github.com/TykTechnologies/gorpc/README.md b/vendor/github.com/TykTechnologies/gorpc/README.md new file mode 100644 index 00000000000..0b3d7e5f68b --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/README.md @@ -0,0 +1,121 @@ +gorpc +===== + +Simple, fast and scalable golang RPC library for high load. + + +Gorpc provides the following features useful for highly loaded projects +with RPC: + +* It minimizes the number of connect() syscalls by pipelining request + and response messages over a single connection. + +* It minimizes the number of send() syscalls by packing as much + as possible pending requests and responses into a single compressed buffer + before passing it into send() syscall. + +* It minimizes the number of recv() syscalls by reading and buffering as much + as possible data from the network. + +* It supports RPC batching, which allows preparing multiple requests and sending + them to the server in a single batch. + +These features help the OS minimizing overhead (CPU load, the number of +TCP connections in TIME_WAIT and CLOSE_WAIT states, the number of network +packets and the amount of network bandwidth) required for RPC processing under +high load. + + +Gorpc additionally provides the following features missing +in [net/rpc](http://golang.org/pkg/net/rpc/): + +* Client automatically manages connections and automatically reconnects + to the server on connection errors. +* Client supports response timeouts out of the box. +* Client supports RPC batching out of the box. +* Client detects stuck servers and immediately returns error to the caller. +* Client supports fast message passing to the Server, i.e. requests + without responses. +* Both Client and Server provide network stats and RPC stats out of the box. +* Commonly used RPC transports such as TCP, TLS and unix socket are available + out of the box. +* RPC transport compression is provided out of the box. +* Server provides graceful shutdown out of the box. +* Server supports RPC handlers' councurrency throttling out of the box. +* Server may pass client address to RPC handlers. +* Server gracefully handles panic in RPC handlers. +* Dispatcher accepts functions as RPC handlers. +* Dispatcher supports registering multiple receiver objects of the same type + under distinct names. +* Dispatcher supports RPC handlers with zero, one (request) or two (client + address and request) arguments and zero, one (either response or error) + or two (response, error) return values. + + +Dispatcher API provided by gorpc allows easily converting usual functions +and/or struct methods into RPC versions on both client and server sides. +See [Dispatcher examples](http://godoc.org/github.com/valyala/gorpc#Dispatcher) +for more details. + + +By default TCP connections are used as underlying gorpc transport. +But it is possible using arbitrary underlying transport - just provide custom +implementations for Client.Dial and Server.Listener. +RPC authentication, authorization and encryption can be easily implemented +via custom underlying transport and/or via OnConnect callbacks. +Currently gorpc provides TCP, TLS and unix socket transport out of the box. + + +Currently gorpc with default settings is successfully used in highly loaded +production environment serving up to 40K qps. Switching from http-based rpc +to gorpc reduced required network bandwidth from 300 Mbit/s to 24 Mbit/s. + + +Docs +==== + +See http://godoc.org/github.com/valyala/gorpc . + + +Usage +===== + +Server: +```go +s := &gorpc.Server{ + // Accept clients on this TCP address. + Addr: ":12345", + + // Echo handler - just return back the message we received from the client + Handler: func(clientAddr string, request interface{}) interface{} { + log.Printf("Obtained request %+v from the client %s\n", request, clientAddr) + return request + }, +} +if err := s.Serve(); err != nil { + log.Fatalf("Cannot start rpc server: %s", err) +} +``` + +Client: +```go +c := &gorpc.Client{ + // TCP address of the server. + Addr: "rpc.server.addr:12345", +} +c.Start() + +resp, err := c.Call("foobar") +if err != nil { + log.Fatalf("Error when sending request to server: %s", err) +} +if resp.(string) != "foobar" { + log.Fatalf("Unexpected response from the server: %+v", resp) +} +``` + +Both client and server collect connection stats - the number of bytes +read / written and the number of calls / errors to send(), recv(), connect() +and accept(). This stats is available at Client.Stats and Server.Stats. + +See tests for more usage examples. diff --git a/vendor/github.com/TykTechnologies/gorpc/TODO b/vendor/github.com/TykTechnologies/gorpc/TODO new file mode 100644 index 00000000000..31eac72237e --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/TODO @@ -0,0 +1,3 @@ +- Add support for channel request and response. +- Add support for io.Writer, io.Reader and io.ReadWriter request and response. +- Add HTTP transport via HTTP connection hijacking similar to net/rpc. diff --git a/vendor/github.com/TykTechnologies/gorpc/client.go b/vendor/github.com/TykTechnologies/gorpc/client.go new file mode 100644 index 00000000000..cba9e4c7bd8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/client.go @@ -0,0 +1,718 @@ +package gorpc + +import ( + "fmt" + "io" + "net" + "sync" + "time" +) + +// Client implements RPC client. +// +// The client must be started with Client.Start() before use. +// +// It is absolutely safe and encouraged using a single client across arbitrary +// number of concurrently running goroutines. +// +// Default client settings are optimized for high load, so don't override +// them without valid reason. +type Client struct { + // Server address to connect to. + // + // The address format depends on the underlying transport provided + // by Client.Dial. The following transports are provided out of the box: + // * TCP - see NewTCPClient() and NewTCPServer(). + // * TLS - see NewTLSClient() and NewTLSServer(). + // * Unix sockets - see NewUnixClient() and NewUnixServer(). + // + // By default TCP transport is used. + Addr string + + // The number of concurrent connections the client should establish + // to the sever. + // By default only one connection is established. + Conns int + + // The maximum number of pending requests in the queue. + // + // The number of pending requsts should exceed the expected number + // of concurrent goroutines calling client's methods. + // Otherwise a lot of ClientError.Overflow errors may appear. + // + // Default is DefaultPendingMessages. + PendingRequests int + + // Delay between request flushes. + // + // Negative values lead to immediate requests' sending to the server + // without their buffering. This minimizes rpc latency at the cost + // of higher CPU and network usage. + // + // Default value is DefaultFlushDelay. + FlushDelay time.Duration + + // Maximum request time. + // Default value is DefaultRequestTimeout. + RequestTimeout time.Duration + + // Disable data compression. + // By default data compression is enabled. + DisableCompression bool + + // Size of send buffer per each underlying connection in bytes. + // Default value is DefaultBufferSize. + SendBufferSize int + + // Size of recv buffer per each underlying connection in bytes. + // Default value is DefaultBufferSize. + RecvBufferSize int + + // OnConnect is called whenever connection to server is established. + // The callback can be used for authentication/authorization/encryption + // and/or for custom transport wrapping. + // + // See also Dial callback, which can be used for sophisticated + // transport implementation. + OnConnect OnConnectFunc + + // The client calls this callback when it needs new connection + // to the server. + // The client passes Client.Addr into Dial(). + // + // Override this callback if you want custom underlying transport + // and/or authentication/authorization. + // Don't forget overriding Server.Listener accordingly. + // + // See also OnConnect for authentication/authorization purposes. + // + // * NewTLSClient() and NewTLSServer() can be used for encrypted rpc. + // * NewUnixClient() and NewUnixServer() can be used for fast local + // inter-process rpc. + // + // By default it returns TCP connections established to the Client.Addr. + Dial DialFunc + + // LogError is used for error logging. + // + // By default the function set via SetErrorLogger() is used. + LogError LoggerFunc + + // Connection statistics. + // + // The stats doesn't reset automatically. Feel free resetting it + // any time you wish. + Stats ConnStats + + requestsChan chan *AsyncResult + + clientStopChan chan struct{} + stopWg sync.WaitGroup +} + +// Start starts rpc client. Establishes connection to the server on Client.Addr. +// +// All the response types the server may return must be registered +// via gorpc.RegisterType() before starting the client. +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +func (c *Client) Start() { + if c.LogError == nil { + c.LogError = errorLogger + } + if c.clientStopChan != nil { + panic("gorpc.Client: the given client is already started. Call Client.Stop() before calling Client.Start() again!") + } + + if c.PendingRequests <= 0 { + c.PendingRequests = DefaultPendingMessages + } + if c.FlushDelay == 0 { + c.FlushDelay = DefaultFlushDelay + } + if c.RequestTimeout <= 0 { + c.RequestTimeout = DefaultRequestTimeout + } + if c.SendBufferSize <= 0 { + c.SendBufferSize = DefaultBufferSize + } + if c.RecvBufferSize <= 0 { + c.RecvBufferSize = DefaultBufferSize + } + + c.requestsChan = make(chan *AsyncResult, c.PendingRequests) + c.clientStopChan = make(chan struct{}) + + if c.Conns <= 0 { + c.Conns = 1 + } + if c.Dial == nil { + c.Dial = defaultDial + } + + for i := 0; i < c.Conns; i++ { + c.stopWg.Add(1) + go clientHandler(c) + } +} + +// Stop stops rpc client. Stopped client can be started again. +func (c *Client) Stop() { + if c.clientStopChan == nil { + panic("gorpc.Client: the client must be started before stopping it") + } + close(c.clientStopChan) + c.stopWg.Wait() + c.clientStopChan = nil +} + +// Call sends the given request to the server and obtains response +// from the server. +// Returns non-nil error if the response cannot be obtained during +// Client.RequestTimeout or server connection problems occur. +// The returned error can be casted to ClientError. +// +// Request and response types may be arbitrary. All the response types +// the server may return must be registered via gorpc.RegisterType() before +// starting the client. +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +// +// Hint: use Dispatcher for distinct calls' construction. +// +// Don't forget starting the client with Client.Start() before calling Client.Call(). +func (c *Client) Call(request interface{}) (response interface{}, err error) { + return c.CallTimeout(request, c.RequestTimeout) +} + +// CallTimeout sends the given request to the server and obtains response +// from the server. +// Returns non-nil error if the response cannot be obtained during +// the given timeout or server connection problems occur. +// The returned error can be casted to ClientError. +// +// Request and response types may be arbitrary. All the response types +// the server may return must be registered via gorpc.RegisterType() before +// starting the client. +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +// +// Hint: use Dispatcher for distinct calls' construction. +// +// Don't forget starting the client with Client.Start() before calling Client.Call(). +func (c *Client) CallTimeout(request interface{}, timeout time.Duration) (response interface{}, err error) { + var m *AsyncResult + if m, err = c.CallAsync(request); err != nil { + return nil, err + } + + t := acquireTimer(timeout) + + select { + case <-m.Done: + response, err = m.Response, m.Error + case <-t.C: + err = getClientTimeoutError(c, timeout) + } + + releaseTimer(t) + return +} + +func getClientTimeoutError(c *Client, timeout time.Duration) error { + err := fmt.Errorf("gorpc.Client: [%s]. Cannot obtain response during timeout=%s", c.Addr, timeout) + c.LogError("%s", err) + return &ClientError{ + Timeout: true, + err: err, + } +} + +// Send sends the given request to the server and doesn't wait for response. +// +// Since this is 'fire and forget' function, which never waits for response, +// it cannot guarantee that the server receives and successfully processes +// the given request. Though in most cases under normal conditions requests +// should reach the server and it should successfully process them. +// Send semantics is similar to UDP messages' semantics. +// +// The server may return arbitrary response on Send() request, but the response +// is totally ignored. +// +// Don't forget starting the client with Client.Start() before calling Client.Send(). +func (c *Client) Send(request interface{}) error { + _, err := c.callAsync(request, true) + return err +} + +// AsyncResult is a result returned from Client.CallAsync(). +type AsyncResult struct { + // The response can be read only after <-Done unblocks. + Response interface{} + + // The error can be read only after <-Done unblocks. + // The error can be casted to ClientError. + Error error + + // Response and Error become available after <-Done unblocks. + Done <-chan struct{} + + request interface{} + t time.Time + done chan struct{} +} + +// CallAsync starts async rpc call. +// +// Rpc call is complete after <-AsyncResult.Done unblocks. +// If you want canceling the request, just throw away the returned AsyncResult. +// +// CallAsync doesn't respect Client.RequestTimeout - response timeout +// may be controlled by the caller via something like: +// +// r := c.CallAsync("foobar") +// select { +// case <-time.After(c.RequestTimeout): +// log.Printf("rpc timeout!") +// case <-r.Done: +// processResponse(r.Response, r.Error) +// } +// +// Don't forget starting the client with Client.Start() before +// calling Client.CallAsync(). +func (c *Client) CallAsync(request interface{}) (*AsyncResult, error) { + return c.callAsync(request, false) +} + +func (c *Client) callAsync(request interface{}, skipResponse bool) (ar *AsyncResult, err error) { + m := &AsyncResult{ + request: request, + } + if !skipResponse { + m.t = time.Now() + m.done = make(chan struct{}) + m.Done = m.done + } + + select { + case c.requestsChan <- m: + return m, nil + default: + err = fmt.Errorf("gorpc.Client: [%s]. Requests' queue with size=%d is overflown. Try increasing Client.PendingRequests value", c.Addr, cap(c.requestsChan)) + c.LogError("%s", err) + err = &ClientError{ + Overflow: true, + err: err, + } + return nil, err + } +} + +// Batch allows grouping and executing multiple RPCs in a single batch. +// +// Batch may be created via Client.NewBatch(). +type Batch struct { + c *Client + ops []*BatchResult + opsLock sync.Mutex +} + +// BatchResult is a result returned from Batch.Add*(). +type BatchResult struct { + // The response can be read only after Batch.Call*() returns. + Response interface{} + + // The error can be read only after Batch.Call*() returns. + // The error can be casted to ClientError. + Error error + + // <-Done unblocks after Batch.Call*() returns. + // Response and Error become available after <-Done unblocks. + Done <-chan struct{} + + request interface{} + ctx interface{} + done chan struct{} +} + +// NewBatch creates new RPC batch. +// +// It is safe creating multiple concurrent batches from a single client. +// +// Don't forget starting the client with Client.Start() before working +// with batched RPC. +func (c *Client) NewBatch() *Batch { + return &Batch{ + c: c, + } +} + +// Add ads new request to the RPC batch. +// +// The order of batched RPCs execution on the server is unspecified. +// +// All the requests added to the batch are sent to the server at once +// when Batch.Call*() is called. +// +// It is safe adding multiple requests to the same batch from concurrently +// running goroutines. +func (b *Batch) Add(request interface{}) *BatchResult { + return b.add(request, false) +} + +// AddSkipResponse adds new request to the RPC batch and doesn't care +// about the response. +// +// The order of batched RPCs execution on the server is unspecified. +// +// All the requests added to the batch are sent to the server at once +// when Batch.Call*() is called. +// +// It is safe adding multiple requests to the same batch from concurrently +// running goroutines. +func (b *Batch) AddSkipResponse(request interface{}) { + b.add(request, true) +} + +func (b *Batch) add(request interface{}, skipResponse bool) *BatchResult { + br := &BatchResult{ + request: request, + } + if !skipResponse { + br.done = make(chan struct{}) + br.Done = br.done + } + + b.opsLock.Lock() + b.ops = append(b.ops, br) + b.opsLock.Unlock() + + return br +} + +// Call calls all the RPCs added via Batch.Add(). +// +// The order of batched RPCs execution on the server is unspecified. +// +// The caller may read all BatchResult contents returned from Batch.Add() +// after the Call returns. +// +// It is guaranteed that all <-BatchResult.Done channels are unblocked after +// the Call returns. +func (b *Batch) Call() error { + return b.CallTimeout(b.c.RequestTimeout) +} + +// CallTimeout calls all the RPCs added via Batch.Add() and waits for +// all the RPC responses during the given timeout. +// +// The caller may read all BatchResult contents returned from Batch.Add() +// after the CallTimeout returns. +// +// It is guaranteed that all <-BatchResult.Done channels are unblocked after +// the CallTimeout returns. +func (b *Batch) CallTimeout(timeout time.Duration) error { + b.opsLock.Lock() + ops := b.ops + b.ops = nil + b.opsLock.Unlock() + + results := make([]*AsyncResult, len(ops)) + for i := range ops { + op := ops[i] + r, err := callAsyncRetry(b.c, op.request, op.done == nil, 5) + if err != nil { + return err + } + results[i] = r + } + + t := acquireTimer(timeout) + + for i := range results { + r := results[i] + op := ops[i] + if op.done == nil { + continue + } + + select { + case <-r.Done: + op.Response, op.Error = r.Response, r.Error + close(op.done) + case <-t.C: + releaseTimer(t) + err := getClientTimeoutError(b.c, timeout) + for ; i < len(results); i++ { + op = ops[i] + op.Error = err + if op.done != nil { + close(op.done) + } + } + return err + } + } + + releaseTimer(t) + + return nil +} + +func callAsyncRetry(c *Client, request interface{}, skipResponse bool, retriesCount int) (*AsyncResult, error) { + retriesCount++ + for { + ar, err := c.callAsync(request, skipResponse) + if err == nil { + return ar, nil + } + if !err.(*ClientError).Overflow { + return nil, err + } + retriesCount-- + if retriesCount <= 0 { + return nil, err + } + time.Sleep(10 * time.Millisecond) + } +} + +// ClientError is an error Client methods can return. +type ClientError struct { + // Set if the error is timeout-related. + Timeout bool + + // Set if the error is connection-related. + Connection bool + + // Set if the error is server-related. + Server bool + + // Set if the error is related to internal resources' overflow. + // Increase PendingRequests if you see a lot of such errors. + Overflow bool + + err error +} + +func (e *ClientError) Error() string { + return e.err.Error() +} + +func clientHandler(c *Client) { + defer c.stopWg.Done() + + var conn net.Conn + var err error + + for { + dialChan := make(chan struct{}) + go func() { + if conn, err = c.Dial(c.Addr); err != nil { + c.LogError("gorpc.Client: [%s]. Cannot establish rpc connection: [%s]", c.Addr, err) + time.Sleep(time.Second) + } + close(dialChan) + }() + + select { + case <-c.clientStopChan: + return + case <-dialChan: + c.Stats.incDialCalls() + } + + if err != nil { + c.Stats.incDialErrors() + continue + } + clientHandleConnection(c, conn) + } +} + +func clientHandleConnection(c *Client, conn net.Conn) { + if c.OnConnect != nil { + newConn, _, err := c.OnConnect(conn) + if err != nil { + c.LogError("gorpc.Client: [%s]. OnConnect error: [%s]", c.Addr, err) + conn.Close() + return + } + conn = newConn + } + + var buf [1]byte + if !c.DisableCompression { + buf[0] = 1 + } + _, err := conn.Write(buf[:]) + if err != nil { + c.LogError("gorpc.Client: [%s]. Error when writing handshake to server: [%s]", c.Addr, err) + conn.Close() + return + } + + stopChan := make(chan struct{}) + + pendingRequests := make(map[uint64]*AsyncResult) + var pendingRequestsLock sync.Mutex + + writerDone := make(chan error, 1) + go clientWriter(c, conn, pendingRequests, &pendingRequestsLock, stopChan, writerDone) + + readerDone := make(chan error, 1) + go clientReader(c, conn, pendingRequests, &pendingRequestsLock, readerDone) + + select { + case err = <-writerDone: + close(stopChan) + conn.Close() + <-readerDone + case err = <-readerDone: + close(stopChan) + conn.Close() + <-writerDone + case <-c.clientStopChan: + close(stopChan) + conn.Close() + <-readerDone + <-writerDone + } + + if err != nil { + c.LogError("%s", err) + err = &ClientError{ + Connection: true, + err: err, + } + } + for _, m := range pendingRequests { + m.Error = err + if m.done != nil { + close(m.done) + } + } +} + +func clientWriter(c *Client, w io.Writer, pendingRequests map[uint64]*AsyncResult, pendingRequestsLock *sync.Mutex, stopChan <-chan struct{}, done chan<- error) { + var err error + defer func() { done <- err }() + + e := newMessageEncoder(w, c.SendBufferSize, !c.DisableCompression, &c.Stats) + defer e.Close() + + t := time.NewTimer(c.FlushDelay) + var flushChan <-chan time.Time + var wr wireRequest + var msgID uint64 + for { + var m *AsyncResult + + select { + case m = <-c.requestsChan: + default: + select { + case <-stopChan: + return + case m = <-c.requestsChan: + case <-flushChan: + if err = e.Flush(); err != nil { + err = fmt.Errorf("gorpc.Client: [%s]. Cannot flush requests to underlying stream: [%s]", c.Addr, err) + return + } + flushChan = nil + continue + } + } + + if flushChan == nil { + flushChan = getFlushChan(t, c.FlushDelay) + } + + if m.done == nil { + wr.ID = 0 + } else { + msgID++ + if msgID == 0 { + msgID = 1 + } + pendingRequestsLock.Lock() + n := len(pendingRequests) + for { + if _, ok := pendingRequests[msgID]; !ok { + break + } + msgID++ + } + pendingRequests[msgID] = m + pendingRequestsLock.Unlock() + + if n > 10*c.PendingRequests { + err = fmt.Errorf("gorpc.Client: [%s]. The server didn't return %d responses yet. Closing server connection in order to prevent client resource leaks", c.Addr, n) + return + } + + wr.ID = msgID + } + + wr.Request = m.request + m.request = nil + if err = e.Encode(wr); err != nil { + err = fmt.Errorf("gorpc.Client: [%s]. Cannot send request to wire: [%s]", c.Addr, err) + return + } + wr.Request = nil + } +} + +func clientReader(c *Client, r io.Reader, pendingRequests map[uint64]*AsyncResult, pendingRequestsLock *sync.Mutex, done chan<- error) { + var err error + defer func() { + if r := recover(); r != nil { + if err == nil { + err = fmt.Errorf("gorpc.Client: [%s]. Panic when reading data from server: %v", c.Addr, r) + } + } + done <- err + }() + + d := newMessageDecoder(r, c.RecvBufferSize, !c.DisableCompression, &c.Stats) + defer d.Close() + + var wr wireResponse + for { + if err = d.Decode(&wr); err != nil { + err = fmt.Errorf("gorpc.Client: [%s]. Cannot decode response: [%s]", c.Addr, err) + return + } + + pendingRequestsLock.Lock() + m, ok := pendingRequests[wr.ID] + if ok { + delete(pendingRequests, wr.ID) + } + pendingRequestsLock.Unlock() + + if !ok { + err = fmt.Errorf("gorpc.Client: [%s]. Unexpected msgID=[%d] obtained from server", c.Addr, wr.ID) + return + } + + m.Response = wr.Response + + wr.ID = 0 + wr.Response = nil + if wr.Error != "" { + m.Error = &ClientError{ + Server: true, + err: fmt.Errorf("gorpc.Client: [%s]. Server error: [%s]", c.Addr, wr.Error), + } + wr.Error = "" + } + + close(m.done) + + c.Stats.incRPCCalls() + c.Stats.incRPCTime(uint64(time.Since(m.t).Seconds() * 1000)) + } +} diff --git a/vendor/github.com/TykTechnologies/gorpc/common.go b/vendor/github.com/TykTechnologies/gorpc/common.go new file mode 100644 index 00000000000..bb0823b036d --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/common.go @@ -0,0 +1,118 @@ +package gorpc + +import ( + "fmt" + "log" + "net" + "sync" + "time" +) + +const ( + // DefaultConcurrency is the default number of concurrent rpc calls + // the server can process. + DefaultConcurrency = 8 * 1024 + + // DefaultRequestTimeout is the default timeout for client request. + DefaultRequestTimeout = 20 * time.Second + + // DefaultPendingMessages is the default number of pending messages + // handled by Client and Server. + DefaultPendingMessages = 32 * 1024 + + // DefaultFlushDelay is the default delay between message flushes + // on Client and Server. + DefaultFlushDelay = -1 + + // DefaultBufferSize is the default size for Client and Server buffers. + DefaultBufferSize = 64 * 1024 +) + +// OnConnectFunc is a callback, which may be called by both Client and Server +// on every connection creation if assigned +// to Client.OnConnect / Server.OnConnect. +// +// remoteAddr is the address of the remote end for the established +// connection rwc. +// +// The callback must return either rwc itself or a rwc wrapper. +// The returned connection wrapper MUST send all the data to the underlying +// rwc on every Write() call, otherwise the connection will hang forever. +// +// The callback may be used for authentication/authorization and/or custom +// transport wrapping. +type OnConnectFunc func(rwc net.Conn) (net.Conn, string, error) + +// LoggerFunc is an error logging function to pass to gorpc.SetErrorLogger(). +type LoggerFunc func(format string, args ...interface{}) + +var errorLogger = LoggerFunc(log.Printf) + +// SetErrorLogger sets the given error logger to use in gorpc. +// +// By default log.Printf is used for error logging. +func SetErrorLogger(f LoggerFunc) { + errorLogger = f +} + +// NilErrorLogger discards all error messages. +// +// Pass NilErrorLogger to SetErrorLogger() in order to suppress error log generated +// by gorpc. +func NilErrorLogger(format string, args ...interface{}) {} + +func logPanic(format string, args ...interface{}) { + errorLogger(format, args...) + s := fmt.Sprintf(format, args...) + panic(s) +} + +var timerPool sync.Pool + +func acquireTimer(timeout time.Duration) *time.Timer { + tv := timerPool.Get() + if tv == nil { + return time.NewTimer(timeout) + } + + t := tv.(*time.Timer) + if t.Reset(timeout) { + panic("BUG: Active timer trapped into acquireTimer()") + } + return t +} + +func releaseTimer(t *time.Timer) { + if !t.Stop() { + // Collect possibly added time from the channel + // if timer has been stopped and nobody collected its' value. + select { + case <-t.C: + default: + } + } + + timerPool.Put(t) +} + +var closedFlushChan = make(chan time.Time) + +func init() { + close(closedFlushChan) +} + +func getFlushChan(t *time.Timer, flushDelay time.Duration) <-chan time.Time { + if flushDelay <= 0 { + return closedFlushChan + } + + if !t.Stop() { + // Exhaust expired timer's chan. + select { + case <-t.C: + default: + } + } + t.Reset(flushDelay) + return t.C +} diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats.go b/vendor/github.com/TykTechnologies/gorpc/conn_stats.go new file mode 100644 index 00000000000..bc08c3da47a --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/conn_stats.go @@ -0,0 +1,127 @@ +package gorpc + +import ( + "io" + "sync" + "time" +) + +// ConnStats provides connection statistics. Applied to both gorpc.Client +// and gorpc.Server. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +type ConnStats struct { + // The number of rpc calls performed. + RPCCalls uint64 + + // The total aggregate time for all rpc calls in milliseconds. + // + // This time can be used for calculating the average response time + // per RPC: + // avgRPCTtime = RPCTime / RPCCalls + RPCTime uint64 + + // The number of bytes written to the underlying connections. + BytesWritten uint64 + + // The number of bytes read from the underlying connections. + BytesRead uint64 + + // The number of Read() calls. + ReadCalls uint64 + + // The number of Read() errors. + ReadErrors uint64 + + // The number of Write() calls. + WriteCalls uint64 + + // The number of Write() errors. + WriteErrors uint64 + + // The number of Dial() calls. + DialCalls uint64 + + // The number of Dial() errors. + DialErrors uint64 + + // The number of Accept() calls. + AcceptCalls uint64 + + // The number of Accept() errors. + AcceptErrors uint64 + + FuncCallStats map[string]uint64 + + // lock is for 386 builds. See https://github.com/valyala/gorpc/issues/5 . + lock sync.Mutex +} + +// AvgRPCTime returns the average RPC execution time. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +func (cs *ConnStats) AvgRPCTime() time.Duration { + return time.Duration(float64(cs.RPCTime)/float64(cs.RPCCalls)) * time.Millisecond +} + +// AvgRPCBytes returns the average bytes sent / received per RPC. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +func (cs *ConnStats) AvgRPCBytes() (send float64, recv float64) { + return float64(cs.BytesWritten) / float64(cs.RPCCalls), float64(cs.BytesRead) / float64(cs.RPCCalls) +} + +// AvgRPCCalls returns the average number of write() / read() syscalls per PRC. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +func (cs *ConnStats) AvgRPCCalls() (write float64, read float64) { + return float64(cs.WriteCalls) / float64(cs.RPCCalls), float64(cs.ReadCalls) / float64(cs.RPCCalls) +} + +type writerCounter struct { + w io.Writer + cs *ConnStats +} + +type readerCounter struct { + r io.Reader + cs *ConnStats +} + +func newWriterCounter(w io.Writer, cs *ConnStats) io.Writer { + return &writerCounter{ + w: w, + cs: cs, + } +} + +func newReaderCounter(r io.Reader, cs *ConnStats) io.Reader { + return &readerCounter{ + r: r, + cs: cs, + } +} + +func (w *writerCounter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.cs.incWriteCalls() + if err != nil { + w.cs.incWriteErrors() + } + w.cs.addBytesWritten(uint64(n)) + return n, err +} + +func (r *readerCounter) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + r.cs.incReadCalls() + if err != nil { + r.cs.incReadErrors() + } + r.cs.addBytesRead(uint64(n)) + return n, err +} diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats_386.go b/vendor/github.com/TykTechnologies/gorpc/conn_stats_386.go new file mode 100644 index 00000000000..d1ed7b090fd --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/conn_stats_386.go @@ -0,0 +1,123 @@ +// Separate implementation for 386, since it has broken support for atomics. +// See https://github.com/valyala/gorpc/issues/5 for details. + +// +build 386 + +package gorpc + +import ( + "sync" +) + +// Snapshot returns connection statistics' snapshot. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +func (cs *ConnStats) Snapshot() *ConnStats { + cs.lock.Lock() + snapshot := *cs + cs.lock.Unlock() + + snapshot.lock = sync.Mutex{} + return &snapshot +} + +// Reset resets all the stats counters. +func (cs *ConnStats) Reset() { + cs.lock.Lock() + cs.RPCCalls = 0 + cs.RPCTime = 0 + cs.BytesWritten = 0 + cs.BytesRead = 0 + cs.WriteCalls = 0 + cs.WriteErrors = 0 + cs.ReadCalls = 0 + cs.ReadErrors = 0 + cs.DialCalls = 0 + cs.DialErrors = 0 + cs.AcceptCalls = 0 + cs.AcceptErrors = 0 + cs.FuncCallStats = make(map[string]uint64) + cs.lock.Unlock() +} + +func (cs *ConnStats) incRPCCalls() { + cs.lock.Lock() + cs.RPCCalls++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incRPCTime(dt uint64) { + cs.lock.Lock() + cs.RPCTime += dt + cs.lock.Unlock() +} + +func (cs *ConnStats) addBytesWritten(n uint64) { + cs.lock.Lock() + cs.BytesWritten += n + cs.lock.Unlock() +} + +func (cs *ConnStats) addBytesRead(n uint64) { + cs.lock.Lock() + cs.BytesRead += n + cs.lock.Unlock() +} + +func (cs *ConnStats) incReadCalls() { + cs.lock.Lock() + cs.ReadCalls++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incReadErrors() { + cs.lock.Lock() + cs.ReadErrors++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incWriteCalls() { + cs.lock.Lock() + cs.WriteCalls++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incWriteErrors() { + cs.lock.Lock() + cs.WriteErrors++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incDialCalls() { + cs.lock.Lock() + cs.DialCalls++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incDialErrors() { + cs.lock.Lock() + cs.DialErrors++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incAcceptCalls() { + cs.lock.Lock() + cs.AcceptCalls++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incAcceptErrors() { + cs.lock.Lock() + cs.AcceptErrors++ + cs.lock.Unlock() +} + +func (cs *ConnStats) incFuncCalls(fn string) { + cs.lock.Lock() + if cs.FuncCallStats == nil { + cs.FuncCallStats = make(map[string]uint64) + } + cs.FuncCallStats[fn]++ + cs.lock.Unlock() +} diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats_generic.go b/vendor/github.com/TykTechnologies/gorpc/conn_stats_generic.go new file mode 100644 index 00000000000..51dcce20c1d --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/conn_stats_generic.go @@ -0,0 +1,112 @@ +// +build !386 + +package gorpc + +import ( + "sync/atomic" +) + +// Snapshot returns connection statistics' snapshot. +// +// Use stats returned from ConnStats.Snapshot() on live Client and / or Server, +// since the original stats can be updated by concurrently running goroutines. +func (cs *ConnStats) Snapshot() *ConnStats { + cs.lock.Lock() + funcCallStatsCopy := make(map[string]uint64) + for k, v := range cs.FuncCallStats { + funcCallStatsCopy[k] = v + } + cs.lock.Unlock() + + return &ConnStats{ + RPCCalls: atomic.LoadUint64(&cs.RPCCalls), + RPCTime: atomic.LoadUint64(&cs.RPCTime), + BytesWritten: atomic.LoadUint64(&cs.BytesWritten), + BytesRead: atomic.LoadUint64(&cs.BytesRead), + ReadCalls: atomic.LoadUint64(&cs.ReadCalls), + ReadErrors: atomic.LoadUint64(&cs.ReadErrors), + WriteCalls: atomic.LoadUint64(&cs.WriteCalls), + WriteErrors: atomic.LoadUint64(&cs.WriteErrors), + DialCalls: atomic.LoadUint64(&cs.DialCalls), + DialErrors: atomic.LoadUint64(&cs.DialErrors), + AcceptCalls: atomic.LoadUint64(&cs.AcceptCalls), + AcceptErrors: atomic.LoadUint64(&cs.AcceptErrors), + FuncCallStats: funcCallStatsCopy, + } +} + +// Reset resets all the stats counters. +func (cs *ConnStats) Reset() { + atomic.StoreUint64(&cs.RPCCalls, 0) + atomic.StoreUint64(&cs.RPCTime, 0) + atomic.StoreUint64(&cs.BytesWritten, 0) + atomic.StoreUint64(&cs.BytesRead, 0) + atomic.StoreUint64(&cs.WriteCalls, 0) + atomic.StoreUint64(&cs.WriteErrors, 0) + atomic.StoreUint64(&cs.ReadCalls, 0) + atomic.StoreUint64(&cs.ReadErrors, 0) + atomic.StoreUint64(&cs.DialCalls, 0) + atomic.StoreUint64(&cs.DialErrors, 0) + atomic.StoreUint64(&cs.AcceptCalls, 0) + atomic.StoreUint64(&cs.AcceptErrors, 0) + cs.lock.Lock() + cs.FuncCallStats = make(map[string]uint64) + cs.lock.Unlock() +} + +func (cs *ConnStats) incRPCCalls() { + atomic.AddUint64(&cs.RPCCalls, 1) +} + +func (cs *ConnStats) incRPCTime(dt uint64) { + atomic.AddUint64(&cs.RPCTime, dt) +} + +func (cs *ConnStats) addBytesWritten(n uint64) { + atomic.AddUint64(&cs.BytesWritten, n) +} + +func (cs *ConnStats) addBytesRead(n uint64) { + atomic.AddUint64(&cs.BytesRead, n) +} + +func (cs *ConnStats) incReadCalls() { + atomic.AddUint64(&cs.ReadCalls, 1) +} + +func (cs *ConnStats) incReadErrors() { + atomic.AddUint64(&cs.ReadErrors, 1) +} + +func (cs *ConnStats) incWriteCalls() { + atomic.AddUint64(&cs.WriteCalls, 1) +} + +func (cs *ConnStats) incWriteErrors() { + atomic.AddUint64(&cs.WriteErrors, 1) +} + +func (cs *ConnStats) incDialCalls() { + atomic.AddUint64(&cs.DialCalls, 1) +} + +func (cs *ConnStats) incDialErrors() { + atomic.AddUint64(&cs.DialErrors, 1) +} + +func (cs *ConnStats) incAcceptCalls() { + atomic.AddUint64(&cs.AcceptCalls, 1) +} + +func (cs *ConnStats) incAcceptErrors() { + atomic.AddUint64(&cs.AcceptErrors, 1) +} + +func (cs *ConnStats) incFuncCalls(fn string) { + cs.lock.Lock() + if cs.FuncCallStats == nil { + cs.FuncCallStats = make(map[string]uint64) + } + cs.FuncCallStats[fn]++ + cs.lock.Unlock() +} diff --git a/vendor/github.com/TykTechnologies/gorpc/dispatcher.go b/vendor/github.com/TykTechnologies/gorpc/dispatcher.go new file mode 100644 index 00000000000..3cb954af3cc --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/dispatcher.go @@ -0,0 +1,620 @@ +package gorpc + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" +) + +// Dispatcher helps constructing HandlerFunc for dispatching across multiple +// functions and/or services. +// +// Dispatcher also automatically registers all request and response types +// for all functions and/or methods registered via AddFunc() and AddService(), +// so there is no need in calling RegisterType() for them. +// +// See examples for details. +type Dispatcher struct { + serviceMap map[string]*serviceData +} + +type serviceData struct { + sv reflect.Value + funcMap map[string]*funcData +} + +type funcData struct { + inNum int + reqt reflect.Type + fv reflect.Value +} + +// NewDispatcher returns new dispatcher. +func NewDispatcher() *Dispatcher { + return &Dispatcher{ + serviceMap: make(map[string]*serviceData), + } +} + +// AddFunc registers the given function f under the name funcName. +// +// The function must accept zero, one or two input arguments. +// If the function has two arguments, then the first argument must have +// string type - the server will pass client address in this parameter. +// +// The function must return zero, one or two values. +// * If the function has two return values, then the second value must have +// error type - the server will propagate this error to the client. +// +// * If the function returns only error value, then the server treats it +// as error, not return value, when sending to the client. +// +// Arbitrary number of functions can be registered in the dispatcher. +// +// See examples for details. +func (d *Dispatcher) AddFunc(funcName string, f interface{}) { + sd, ok := d.serviceMap[""] + if !ok { + sd = &serviceData{ + funcMap: make(map[string]*funcData), + } + d.serviceMap[""] = sd + } + + if _, ok := sd.funcMap[funcName]; ok { + logPanic("gorpc.Dispatcher: function %s has been already registered", funcName) + } + + fd := &funcData{ + fv: reflect.Indirect(reflect.ValueOf(f)), + } + var err error + if fd.inNum, fd.reqt, err = validateFunc(funcName, fd.fv, false); err != nil { + logPanic("gorpc.Disaptcher: %s", err) + } + sd.funcMap[funcName] = fd +} + +// AddService registers public methods of the given service under +// the given name serviceName. +// +// Since only public methods are registered, the service must have at least +// one public method. +// +// All public methods must conform requirements described in AddFunc(). +func (d *Dispatcher) AddService(serviceName string, service interface{}) { + if serviceName == "" { + logPanic("gorpc.Dispatcher: serviceName cannot be empty") + } + if _, ok := d.serviceMap[serviceName]; ok { + logPanic("gorpc.Dispatcher: service with name=[%s] has been already registered", serviceName) + } + + funcMap := make(map[string]*funcData) + + st := reflect.TypeOf(service) + if st.Kind() == reflect.Struct { + logPanic("gorpc.Dispatcher: service [%s] must be a pointer to struct, i.e. *%s", serviceName, st) + } + + for i := 0; i < st.NumMethod(); i++ { + mv := st.Method(i) + + if mv.PkgPath != "" { + // skip unexported methods + continue + } + + funcName := serviceName + "." + mv.Name + fd := &funcData{ + fv: mv.Func, + } + var err error + if fd.inNum, fd.reqt, err = validateFunc(funcName, fd.fv, true); err != nil { + logPanic("gorpc.Dispatcher: %s", err) + } + funcMap[mv.Name] = fd + } + + if len(funcMap) == 0 { + logPanic("gorpc.Dispatcher: the service %s has no methods suitable for rpc", serviceName) + } + + d.serviceMap[serviceName] = &serviceData{ + sv: reflect.ValueOf(service), + funcMap: funcMap, + } +} + +func validateFunc(funcName string, fv reflect.Value, isMethod bool) (inNum int, reqt reflect.Type, err error) { + if funcName == "" { + err = fmt.Errorf("funcName cannot be empty") + return + } + + ft := fv.Type() + if ft.Kind() != reflect.Func { + err = fmt.Errorf("function [%s] must be a function instead of %s", funcName, ft) + return + } + + inNum = ft.NumIn() + outNum := ft.NumOut() + + dt := 0 + if isMethod { + dt = 1 + } + + if inNum == 2+dt { + if ft.In(dt).Kind() != reflect.String { + err = fmt.Errorf("unexpected type for the first argument of the function [%s]: [%s]. Expected string", funcName, ft.In(dt)) + return + } + } else if inNum > 2+dt { + err = fmt.Errorf("unexpected number of arguments in the function [%s]: %d. Expected 0, 1 (request) or 2 (clientAddr, request)", funcName, inNum-dt) + return + } + + if outNum == 2 { + if !isErrorType(ft.Out(1)) { + err = fmt.Errorf("unexpected type for the second return value of the function [%s]: [%s]. Expected [%s]", funcName, ft.Out(1), errt) + return + } + } else if outNum > 2 { + err = fmt.Errorf("unexpected number of return values for the function %s: %d. Expected 0, 1 (response) or 2 (response, error)", funcName, outNum) + return + } + + if inNum > dt { + reqt = ft.In(inNum - 1) + if err = registerType("request", funcName, reqt); err != nil { + return + } + } + + if outNum > 0 { + respt := ft.Out(0) + if !isErrorType(respt) { + if err = registerType("response", funcName, ft.Out(0)); err != nil { + return + } + } + } + + return +} + +func registerType(s, funcName string, t reflect.Type) error { + if t.Kind() == reflect.Struct { + return fmt.Errorf("%s in the function [%s] should be passed by reference, i.e. *%s", s, funcName, t) + } + if err := validateType(t); err != nil { + return fmt.Errorf("%s in the function [%s] cannot contain %s", s, funcName, err) + } + + t = removePtr(t) + tv := reflect.New(t) + if t.Kind() != reflect.Struct { + tv = reflect.Indirect(tv) + } + + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.Struct: + RegisterType(tv.Interface()) + default: + } + + return nil +} + +func removePtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +var validatedTypes []*validatedType + +type validatedType struct { + t reflect.Type + err *error +} + +func validateType(t reflect.Type) (err error) { + t = removePtr(t) + for _, vd := range validatedTypes { + if vd.t == t { + return *vd.err + } + } + validatedTypes = append(validatedTypes, &validatedType{ + t: t, + err: &err, + }) + + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + err = fmt.Errorf("%s. Found [%s]", t.Kind(), t) + return + case reflect.Array, reflect.Slice: + if err = validateType(t.Elem()); err != nil { + err = fmt.Errorf("%s in the %s [%s]", err, t.Kind(), t) + return + } + case reflect.Map: + if err = validateType(t.Elem()); err != nil { + err = fmt.Errorf("%s in the value of map [%s]", err, t) + return + } + if err = validateType(t.Key()); err != nil { + err = fmt.Errorf("%s in the key of map [%s]", err, t) + return + } + case reflect.Struct: + n := 0 + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.PkgPath == "" { + if err = validateType(f.Type); err != nil { + err = fmt.Errorf("%s in the field [%s] of struct [%s]", err, f.Name, t) + return + } + n++ + } + } + if n == 0 { + err = fmt.Errorf("struct without exported fields [%s]", t) + return + } + } + + return err +} + +type dispatcherRequest struct { + Request interface{} + Name string +} + +type dispatcherResponse struct { + Response interface{} + Error string +} + +func init() { + RegisterType(&dispatcherRequest{}) + RegisterType(&dispatcherResponse{}) +} + +// NewHandlerFunc returns HandlerFunc serving all the functions and/or services +// registered via AddFunc() and AddService(). +// +// The returned HandlerFunc must be assigned to Server.Handler or +// passed to New*Server(). +func (d *Dispatcher) NewHandlerFunc() HandlerFunc { + if len(d.serviceMap) == 0 { + logPanic("gorpc.Dispatcher: register at least one service before calling HandlerFunc()") + } + + serviceMap := copyServiceMap(d.serviceMap) + + return func(clientAddr string, request interface{}) interface{} { + req, ok := request.(*dispatcherRequest) + if !ok { + logPanic("gorpc.Dispatcher: unsupported request type received from the client: %T", request) + } + return dispatchRequest(serviceMap, clientAddr, req) + } +} + +func copyServiceMap(sm map[string]*serviceData) map[string]*serviceData { + serviceMap := make(map[string]*serviceData) + for sk, sv := range sm { + funcMap := make(map[string]*funcData) + for fk, fv := range sv.funcMap { + funcMap[fk] = fv + } + serviceMap[sk] = &serviceData{ + sv: sv.sv, + funcMap: funcMap, + } + } + return serviceMap +} + +func dispatchRequest(serviceMap map[string]*serviceData, clientAddr string, req *dispatcherRequest) *dispatcherResponse { + callName := strings.SplitN(req.Name, ".", 2) + if len(callName) != 2 { + return &dispatcherResponse{ + Error: fmt.Sprintf("gorpc.Dispatcher: cannot split call name into service name and method name [%s]", req.Name), + } + } + + serviceName, funcName := callName[0], callName[1] + s, ok := serviceMap[serviceName] + if !ok { + return &dispatcherResponse{ + Error: fmt.Sprintf("gorpc.Dispatcher: unknown service name [%s]", serviceName), + } + } + + fd, ok := s.funcMap[funcName] + if !ok { + return &dispatcherResponse{ + Error: fmt.Sprintf("gorpc.Dispatcher: unknown method [%s]", req.Name), + } + } + + var inArgs []reflect.Value + if fd.inNum > 0 { + inArgs = make([]reflect.Value, fd.inNum) + + dt := 0 + if serviceName != "" { + dt = 1 + inArgs[0] = s.sv + } + if fd.inNum == 2+dt { + inArgs[dt] = reflect.ValueOf(clientAddr) + } + if fd.inNum > dt { + reqv := reflect.ValueOf(req.Request) + reqt := reflect.TypeOf(req.Request) + if fd.reqt.String() != "interface {}" && reqt != fd.reqt { + return &dispatcherResponse{ + Error: fmt.Sprintf("gorpc.Dispatcher: unexpected request type for method [%s]: %s. Expected %s", req.Name, reqt, fd.reqt), + } + } + inArgs[len(inArgs)-1] = reqv + } + } + + outArgs := fd.fv.Call(inArgs) + + resp := &dispatcherResponse{} + + if len(outArgs) == 1 { + if isErrorType(outArgs[0].Type()) { + resp.Error = getErrorString(outArgs[0]) + } else { + resp.Response = outArgs[0].Interface() + } + } else if len(outArgs) == 2 { + resp.Error = getErrorString(outArgs[1]) + if resp.Error == "" { + resp.Response = outArgs[0].Interface() + } + } + + return resp +} + +var errt = reflect.TypeOf((*error)(nil)).Elem() + +func isErrorType(t reflect.Type) bool { + return t.Implements(errt) +} + +func getErrorString(v reflect.Value) string { + if v.IsNil() { + return "" + } + return v.Interface().(error).Error() +} + +// DispatcherClient is a Client wrapper suitable for calling registered +// functions and/or for calling methods of the registered services. +type DispatcherClient struct { + c *Client + serviceName string +} + +// NewFuncClient returns a client suitable for calling functions registered +// via AddFunc(). +func (d *Dispatcher) NewFuncClient(c *Client) *DispatcherClient { + if len(d.serviceMap) == 0 || d.serviceMap[""] == nil { + logPanic("gorpc.Dispatcher: register at least one function with AddFunc() before calling NewFuncClient()") + } + + return &DispatcherClient{ + c: c, + } +} + +// NewServiceClient returns a client suitable for calling methods +// of the service with name serviceName registered via AddService(). +// +// It is safe creating multiple service clients over a single underlying client. +func (d *Dispatcher) NewServiceClient(serviceName string, c *Client) *DispatcherClient { + if len(d.serviceMap) == 0 || d.serviceMap[serviceName] == nil { + logPanic("gorpc.Dispatcher: service [%s] must be registered with AddService() before calling NewServiceClient()", serviceName) + } + + return &DispatcherClient{ + c: c, + serviceName: serviceName, + } +} + +// Call calls the given function. +func (dc *DispatcherClient) Call(funcName string, request interface{}) (response interface{}, err error) { + return dc.CallTimeout(funcName, request, dc.c.RequestTimeout) +} + +// CallTimeout calls the given function and waits for response during the given timeout. +func (dc *DispatcherClient) CallTimeout(funcName string, request interface{}, timeout time.Duration) (response interface{}, err error) { + req := dc.getRequest(funcName, request) + resp, err := dc.c.CallTimeout(req, timeout) + return getResponse(resp, err) +} + +// Send sends the given request to the given function and doesn't +// wait for response. +func (dc *DispatcherClient) Send(funcName string, request interface{}) error { + req := dc.getRequest(funcName, request) + return dc.c.Send(req) +} + +// CallAsync calls the given function asynchronously. +func (dc *DispatcherClient) CallAsync(funcName string, request interface{}) (*AsyncResult, error) { + req := dc.getRequest(funcName, request) + + innerAr, err := dc.c.CallAsync(req) + if err != nil { + return nil, err + } + + ch := make(chan struct{}) + ar := &AsyncResult{ + Done: ch, + } + + go func() { + <-innerAr.Done + ar.Response, ar.Error = getResponse(innerAr.Response, innerAr.Error) + close(ch) + }() + + return ar, nil +} + +// DispatcherBatch allows grouping and executing multiple RPCs in a single batch. +// +// DispatcherBatch may be created via DispatcherClient.NewBatch(). +type DispatcherBatch struct { + lock sync.Mutex + c *DispatcherClient + b *Batch + ops []*BatchResult +} + +// NewBatch creates new RPC batch for the given DispatcherClient. +// +// It is safe creating multiple concurrent batches from a single client. +func (dc *DispatcherClient) NewBatch() *DispatcherBatch { + return &DispatcherBatch{ + c: dc, + b: dc.c.NewBatch(), + } +} + +// Add ads new request to the RPC batch. +// +// The order of batched RPCs execution on the server is unspecified. +// +// All the requests added to the batch are sent to the server at once +// when DispatcherBatch.Call*() is called. +// +// It is safe adding multiple requests to the same batch from concurrently +// running goroutines. +func (b *DispatcherBatch) Add(funcName string, request interface{}) *BatchResult { + return b.add(funcName, request, false) +} + +// AddSkipResponse adds new request to the RPC batch and doesn't care +// about the response. +// +// The order of batched RPCs execution on the server is unspecified. +// +// All the requests added to the batch are sent to the server at once +// when DispatcherBatch.Call*() is called. +// +// It is safe adding multiple requests to the same batch from concurrently +// running goroutines. +func (b *DispatcherBatch) AddSkipResponse(funcName string, request interface{}) { + b.add(funcName, request, true) +} + +func (b *DispatcherBatch) add(funcName string, request interface{}, skipResponse bool) *BatchResult { + req := b.c.getRequest(funcName, request) + + var br *BatchResult + b.lock.Lock() + if !skipResponse { + br = &BatchResult{ + ctx: b.b.Add(req), + done: make(chan struct{}), + } + br.Done = br.done + b.ops = append(b.ops, br) + } else { + b.b.AddSkipResponse(req) + } + b.lock.Unlock() + + return br +} + +// Call calls all the RPCs added via DispatcherBatch.Add(). +// +// The order of batched RPCs execution on the server is unspecified. +// +// The caller may read all BatchResult contents returned +// from DispatcherBatch.Add() after the Call returns. +// +// It is guaranteed that all <-BatchResult.Done channels are unblocked after +// the Call returns. +func (b *DispatcherBatch) Call() error { + return b.CallTimeout(b.c.c.RequestTimeout) +} + +// CallTimeout calls all the RPCs added via DispatcherBatch.Add() and waits +// for all the RPC responses during the given timeout. +// +// The caller may read all BatchResult contents returned +// from DispatcherBatch.Add() after the CallTimeout returns. +// +// It is guaranteed that all <-BatchResult.Done channels are unblocked after +// the CallTimeout returns. +func (b *DispatcherBatch) CallTimeout(timeout time.Duration) error { + b.lock.Lock() + bb := b.b + b.b = b.c.c.NewBatch() + ops := b.ops + b.ops = nil + b.lock.Unlock() + + if err := bb.CallTimeout(timeout); err != nil { + return err + } + + for _, op := range ops { + br := op.ctx.(*BatchResult) + op.Response, op.Error = getResponse(br.Response, br.Error) + close(op.done) + } + + return nil +} + +func (dc *DispatcherClient) getRequest(funcName string, request interface{}) *dispatcherRequest { + return &dispatcherRequest{ + Name: dc.serviceName + "." + funcName, + Request: request, + } +} + +func getResponse(respv interface{}, err error) (interface{}, error) { + if err != nil { + return nil, err + } + resp, ok := respv.(*dispatcherResponse) + if !ok { + return nil, &ClientError{ + Server: true, + err: fmt.Errorf("gorpc.DispatcherClient: unexpected response type: %T. Expected *dispatcherResponse", respv), + } + } + if resp.Error != "" { + return nil, &ClientError{ + Server: true, + err: errors.New(resp.Error), + } + } + return resp.Response, nil +} diff --git a/vendor/github.com/TykTechnologies/gorpc/doc.go b/vendor/github.com/TykTechnologies/gorpc/doc.go new file mode 100644 index 00000000000..9acb63d907e --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/doc.go @@ -0,0 +1,15 @@ +/* +Package gorpc provides simple RPC API for highload projects. + +Gorpc has the following features: + + * Easy-to-use API. + * Optimized for high load (>10K qps). + * Uses as low network bandwidth as possible. + * Minimizes the number of TCP connections in TIME_WAIT and WAIT_CLOSE states. + * Minimizes the number of send() and recv() syscalls. + * Provides ability to use arbitrary underlying transport. + By default TCP is used, but TLS and UNIX sockets are already available. + +*/ +package gorpc diff --git a/vendor/github.com/TykTechnologies/gorpc/encoding.go b/vendor/github.com/TykTechnologies/gorpc/encoding.go new file mode 100644 index 00000000000..21a1a0a1072 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/encoding.go @@ -0,0 +1,118 @@ +package gorpc + +import ( + "bufio" + "compress/flate" + "encoding/gob" + "io" +) + +// RegisterType registers the given type to send via rpc. +// +// The client must register all the response types the server may send. +// The server must register all the request types the client may send. +// +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +// +// There is no need in registering argument and return value types +// for functions and methods registered via Dispatcher. +func RegisterType(x interface{}) { + gob.Register(x) +} + +type wireRequest struct { + ID uint64 + Request interface{} +} + +type wireResponse struct { + ID uint64 + Response interface{} + Error string +} + +type messageEncoder struct { + e *gob.Encoder + bw *bufio.Writer + zw *flate.Writer + ww *bufio.Writer +} + +func (e *messageEncoder) Close() error { + if e.zw != nil { + return e.zw.Close() + } + return nil +} + +func (e *messageEncoder) Flush() error { + if e.zw != nil { + if err := e.ww.Flush(); err != nil { + return err + } + if err := e.zw.Flush(); err != nil { + return err + } + } + if err := e.bw.Flush(); err != nil { + return err + } + return nil +} + +func (e *messageEncoder) Encode(msg interface{}) error { + return e.e.Encode(msg) +} + +func newMessageEncoder(w io.Writer, bufferSize int, enableCompression bool, s *ConnStats) *messageEncoder { + w = newWriterCounter(w, s) + bw := bufio.NewWriterSize(w, bufferSize) + + ww := bw + var zw *flate.Writer + if enableCompression { + zw, _ = flate.NewWriter(bw, flate.BestSpeed) + ww = bufio.NewWriterSize(zw, bufferSize) + } + + return &messageEncoder{ + e: gob.NewEncoder(ww), + bw: bw, + zw: zw, + ww: ww, + } +} + +type messageDecoder struct { + d *gob.Decoder + zr io.ReadCloser +} + +func (d *messageDecoder) Close() error { + if d.zr != nil { + return d.zr.Close() + } + return nil +} + +func (d *messageDecoder) Decode(msg interface{}) error { + return d.d.Decode(msg) +} + +func newMessageDecoder(r io.Reader, bufferSize int, enableCompression bool, s *ConnStats) *messageDecoder { + r = newReaderCounter(r, s) + br := bufio.NewReaderSize(r, bufferSize) + + rr := br + var zr io.ReadCloser + if enableCompression { + zr = flate.NewReader(br) + rr = bufio.NewReaderSize(zr, bufferSize) + } + + return &messageDecoder{ + d: gob.NewDecoder(rr), + zr: zr, + } +} diff --git a/vendor/github.com/TykTechnologies/gorpc/go.mod b/vendor/github.com/TykTechnologies/gorpc/go.mod new file mode 100644 index 00000000000..d4263ea8d6f --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/go.mod @@ -0,0 +1,3 @@ +module github.com/TykTechnologies/gorpc + +go 1.15 diff --git a/vendor/github.com/TykTechnologies/gorpc/server.go b/vendor/github.com/TykTechnologies/gorpc/server.go new file mode 100644 index 00000000000..740cf024e0a --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/server.go @@ -0,0 +1,482 @@ +package gorpc + +import ( + "fmt" + "io" + "net" + "runtime" + "sync" + "sync/atomic" + "time" +) + +// HandlerFunc is a server handler function. +// +// clientAddr contains client address returned by Listener.Accept(). +// Request and response types may be arbitrary. +// All the request types the client may send to the server must be registered +// with gorpc.RegisterType() before starting the server. +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +// +// Hint: use Dispatcher for HandlerFunc construction. +type HandlerFunc func(clientAddr string, request interface{}) (response interface{}) + +// Server implements RPC server. +// +// Default server settings are optimized for high load, so don't override +// them without valid reason. +type Server struct { + // Address to listen to for incoming connections. + // + // The address format depends on the underlying transport provided + // by Server.Listener. The following transports are provided + // out of the box: + // * TCP - see NewTCPServer() and NewTCPClient(). + // * TLS (aka SSL) - see NewTLSServer() and NewTLSClient(). + // * Unix sockets - see NewUnixServer() and NewUnixClient(). + // + // By default TCP transport is used. + Addr string + + // Handler function for incoming requests. + // + // Server calls this function for each incoming request. + // The function must process the request and return the corresponding response. + // + // Hint: use Dispatcher for HandlerFunc construction. + Handler HandlerFunc + + // The maximum number of concurrent rpc calls the server may perform. + // Default is DefaultConcurrency. + Concurrency int + + // The maximum delay between response flushes to clients. + // + // Negative values lead to immediate requests' sending to the client + // without their buffering. This minimizes rpc latency at the cost + // of higher CPU and network usage. + // + // Default is DefaultFlushDelay. + FlushDelay time.Duration + + // The maximum number of pending responses in the queue. + // Default is DefaultPendingMessages. + PendingResponses int + + // Size of send buffer per each underlying connection in bytes. + // Default is DefaultBufferSize. + SendBufferSize int + + // Size of recv buffer per each underlying connection in bytes. + // Default is DefaultBufferSize. + RecvBufferSize int + + // OnConnect is called whenever connection from client is accepted. + // The callback can be used for authentication/authorization/encryption + // and/or for custom transport wrapping. + // + // See also Listener, which can be used for sophisticated transport + // implementation. + OnConnect OnConnectFunc + + // The server obtains new client connections via Listener.Accept(). + // + // Override the listener if you want custom underlying transport + // and/or client authentication/authorization. + // Don't forget overriding Client.Dial() callback accordingly. + // + // See also OnConnect for authentication/authorization purposes. + // + // * NewTLSClient() and NewTLSServer() can be used for encrypted rpc. + // * NewUnixClient() and NewUnixServer() can be used for fast local + // inter-process rpc. + // + // By default it returns TCP connections accepted from Server.Addr. + Listener Listener + + // LogError is used for error logging. + // + // By default the function set via SetErrorLogger() is used. + LogError LoggerFunc + + // Connection statistics. + // + // The stats doesn't reset automatically. Feel free resetting it + // any time you wish. + Stats ConnStats + + serverStopChan chan struct{} + stopWg sync.WaitGroup +} + +// Start starts rpc server. +// +// All the request types the client may send to the server must be registered +// with gorpc.RegisterType() before starting the server. +// There is no need in registering base Go types such as int, string, bool, +// float64, etc. or arrays, slices and maps containing base Go types. +func (s *Server) Start() error { + if s.LogError == nil { + s.LogError = errorLogger + } + if s.Handler == nil { + panic("gorpc.Server: Server.Handler cannot be nil") + } + + if s.serverStopChan != nil { + panic("gorpc.Server: server is already running. Stop it before starting it again") + } + s.serverStopChan = make(chan struct{}) + + if s.Concurrency <= 0 { + s.Concurrency = DefaultConcurrency + } + if s.FlushDelay == 0 { + s.FlushDelay = DefaultFlushDelay + } + if s.PendingResponses <= 0 { + s.PendingResponses = DefaultPendingMessages + } + if s.SendBufferSize <= 0 { + s.SendBufferSize = DefaultBufferSize + } + if s.RecvBufferSize <= 0 { + s.RecvBufferSize = DefaultBufferSize + } + + if s.Listener == nil { + s.Listener = &defaultListener{} + } + if err := s.Listener.Init(s.Addr); err != nil { + err = fmt.Errorf("gorpc.Server: [%s]. Cannot listen to: [%s]", s.Addr, err) + s.LogError("%s", err) + return err + } + + workersCh := make(chan struct{}, s.Concurrency) + s.stopWg.Add(1) + go serverHandler(s, workersCh) + return nil +} + +// Stop stops rpc server. Stopped server can be started again. +func (s *Server) Stop() { + if s.serverStopChan == nil { + panic("gorpc.Server: server must be started before stopping it") + } + close(s.serverStopChan) + s.stopWg.Wait() + s.serverStopChan = nil +} + +// Serve starts rpc server and blocks until it is stopped. +func (s *Server) Serve() error { + if err := s.Start(); err != nil { + return err + } + s.stopWg.Wait() + return nil +} + +func serverHandler(s *Server, workersCh chan struct{}) { + defer s.stopWg.Done() + + var conn net.Conn + var err error + var stopping atomic.Value + + + for { + acceptChan := make(chan struct{}) + go func() { + if conn, err = s.Listener.Accept(); err != nil { + if stopping.Load() == nil { + s.LogError("gorpc.Server: [%s]. Cannot accept new connection: [%s]", s.Addr, err) + } + } + close(acceptChan) + }() + + select { + case <-s.serverStopChan: + stopping.Store(true) + s.Listener.Close() + <-acceptChan + return + case <-acceptChan: + s.Stats.incAcceptCalls() + } + + if err != nil { + s.Stats.incAcceptErrors() + select { + case <-s.serverStopChan: + return + case <-time.After(time.Second): + } + continue + } + + s.stopWg.Add(1) + go serverHandleConnection(s, conn, workersCh) + } +} + +func serverHandleConnection(s *Server, conn net.Conn, workersCh chan struct{}) { + defer s.stopWg.Done() + var clientAddr string + var err error + var newConn net.Conn + + if s.OnConnect != nil { + newConn, clientAddr, err = s.OnConnect(conn) + if err != nil { + s.LogError("gorpc.Server: [%s]->[%s]. OnConnect error: [%s]", clientAddr, s.Addr, err) + conn.Close() + return + } + conn = newConn + } + + if clientAddr == "" { + clientAddr = conn.RemoteAddr().String() + } + + var enabledCompression bool + var stopping atomic.Value + zChan := make(chan bool, 1) + + go func() { + var buf [1]byte + if _, err = conn.Read(buf[:]); err != nil { + if stopping.Load() == nil { + s.LogError("gorpc.Server: [%s]->[%s]. Error when reading handshake from client: [%s]", clientAddr, s.Addr, err) + } + } + zChan <- (buf[0] != 0) + }() + select { + case enabledCompression = <-zChan: + if err != nil { + conn.Close() + return + } + case <-s.serverStopChan: + stopping.Store(true) + conn.Close() + return + case <-time.After(10 * time.Second): + s.LogError("gorpc.Server: [%s]->[%s]. Cannot obtain handshake from client during 10s", clientAddr, s.Addr) + conn.Close() + return + } + + responsesChan := make(chan *serverMessage, s.PendingResponses) + stopChan := make(chan struct{}) + + readerDone := make(chan struct{}) + go serverReader(s, conn, clientAddr, responsesChan, stopChan, readerDone, enabledCompression, workersCh) + + writerDone := make(chan struct{}) + go serverWriter(s, conn, clientAddr, responsesChan, stopChan, writerDone, enabledCompression) + + select { + case <-readerDone: + close(stopChan) + conn.Close() + <-writerDone + case <-writerDone: + close(stopChan) + conn.Close() + <-readerDone + case <-s.serverStopChan: + close(stopChan) + conn.Close() + <-readerDone + <-writerDone + } +} + +type serverMessage struct { + ID uint64 + Request interface{} + Response interface{} + Error string + ClientAddr string +} + +var serverMessagePool = &sync.Pool{ + New: func() interface{} { + return &serverMessage{} + }, +} + +func isClientDisconnect(err error) bool { + return err == io.ErrUnexpectedEOF || err == io.EOF +} + +func isServerStop(stopChan <-chan struct{}) bool { + select { + case <-stopChan: + return true + default: + return false + } +} + +func serverReader(s *Server, r io.Reader, clientAddr string, responsesChan chan<- *serverMessage, + stopChan <-chan struct{}, done chan<- struct{}, enabledCompression bool, workersCh chan struct{}) { + + defer func() { + if r := recover(); r != nil { + s.LogError("gorpc.Server: [%s]->[%s]. Panic when reading data from client: %v", clientAddr, s.Addr, r) + } + close(done) + }() + + d := newMessageDecoder(r, s.RecvBufferSize, enabledCompression, &s.Stats) + defer d.Close() + + var wr wireRequest + for { + if err := d.Decode(&wr); err != nil { + if !isClientDisconnect(err) && !isServerStop(stopChan) { + s.LogError("gorpc.Server: [%s]->[%s]. Cannot decode request: [%s]", clientAddr, s.Addr, err) + } + return + } + + m := serverMessagePool.Get().(*serverMessage) + m.ID = wr.ID + m.Request = wr.Request + m.ClientAddr = clientAddr + + wr.ID = 0 + wr.Request = nil + + select { + case workersCh <- struct{}{}: + default: + select { + case workersCh <- struct{}{}: + case <-stopChan: + return + } + } + go serveRequest(s, responsesChan, stopChan, m, workersCh) + } +} + +func serveRequest(s *Server, responsesChan chan<- *serverMessage, stopChan <-chan struct{}, m *serverMessage, workersCh <-chan struct{}) { + request := m.Request + m.Request = nil + clientAddr := m.ClientAddr + m.ClientAddr = "" + skipResponse := (m.ID == 0) + + if skipResponse { + m.Response = nil + m.Error = "" + serverMessagePool.Put(m) + } + + t := time.Now() + response, err := callHandlerWithRecover(s.LogError, s.Handler, clientAddr, s.Addr, request) + s.Stats.incRPCTime(uint64(time.Since(t).Seconds() * 1000)) + + req, ok := request.(*dispatcherRequest) + if !ok { + logPanic("gorpc.Dispatcher: unsupported request type received from the client: %T", request) + } else { + s.Stats.incFuncCalls(req.Name) + } + + if !skipResponse { + m.Response = response + m.Error = err + + // Select hack for better performance. + // See https://github.com/valyala/gorpc/pull/1 for details. + select { + case responsesChan <- m: + default: + select { + case responsesChan <- m: + case <-stopChan: + } + } + } + + <-workersCh +} + +func callHandlerWithRecover(logErrorFunc LoggerFunc, handler HandlerFunc, clientAddr, serverAddr string, request interface{}) (response interface{}, errStr string) { + defer func() { + if x := recover(); x != nil { + stackTrace := make([]byte, 1<<20) + n := runtime.Stack(stackTrace, false) + errStr = fmt.Sprintf("Panic occured: %v\nStack trace: %s", x, stackTrace[:n]) + logErrorFunc("gorpc.Server: [%s]->[%s]. %s", clientAddr, serverAddr, errStr) + } + }() + response = handler(clientAddr, request) + return +} + +func serverWriter(s *Server, w io.Writer, clientAddr string, responsesChan <-chan *serverMessage, stopChan <-chan struct{}, done chan<- struct{}, enabledCompression bool) { + defer func() { close(done) }() + + e := newMessageEncoder(w, s.SendBufferSize, enabledCompression, &s.Stats) + defer e.Close() + + var flushChan <-chan time.Time + t := time.NewTimer(s.FlushDelay) + var wr wireResponse + for { + var m *serverMessage + + select { + case m = <-responsesChan: + default: + // Give the last chance for ready goroutines filling responsesChan :) + runtime.Gosched() + + select { + case <-stopChan: + return + case m = <-responsesChan: + case <-flushChan: + if err := e.Flush(); err != nil { + if !isServerStop(stopChan) { + s.LogError("gorpc.Server: [%s]->[%s]: Cannot flush responses to underlying stream: [%s]", clientAddr, s.Addr, err) + } + return + } + flushChan = nil + continue + } + } + + if flushChan == nil { + flushChan = getFlushChan(t, s.FlushDelay) + } + + wr.ID = m.ID + wr.Response = m.Response + wr.Error = m.Error + + m.Response = nil + m.Error = "" + serverMessagePool.Put(m) + + if err := e.Encode(wr); err != nil { + s.LogError("gorpc.Server: [%s]->[%s]. Cannot send response to wire: [%s]", clientAddr, s.Addr, err) + return + } + wr.Response = nil + wr.Error = "" + + s.Stats.incRPCCalls() + } +} diff --git a/vendor/github.com/TykTechnologies/gorpc/transport.go b/vendor/github.com/TykTechnologies/gorpc/transport.go new file mode 100644 index 00000000000..108cd716377 --- /dev/null +++ b/vendor/github.com/TykTechnologies/gorpc/transport.go @@ -0,0 +1,228 @@ +package gorpc + +import ( + "crypto/tls" + "net" + "time" +) + +var ( + dialer = &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + } +) + +// DialFunc is a function intended for setting to Client.Dial. +// +// It is expected that the returned conn immediately +// sends all the data passed via Write() to the server. +// Otherwise gorpc may hang. +// The conn implementation must call Flush() on underlying buffered +// streams before returning from Write(). +type DialFunc func(addr string) (conn net.Conn, err error) + +// Listener is an interface for custom listeners intended for the Server. +type Listener interface { + // Init is called on server start. + // + // addr contains the address set at Server.Addr. + Init(addr string) error + + // Accept must return incoming connections from clients. + // clientAddr must contain client's address in user-readable view. + // + // It is expected that the returned conn immediately + // sends all the data passed via Write() to the client. + // Otherwise gorpc may hang. + // The conn implementation must call Flush() on underlying buffered + // streams before returning from Write(). + Accept() (conn net.Conn, err error) + + // Close closes the listener. + // All pending calls to Accept() must immediately return errors after + // Close is called. + // All subsequent calls to Accept() must immediately return error. + Close() error +} + +func defaultDial(addr string) (conn net.Conn, err error) { + return dialer.Dial("tcp", addr) +} + +type defaultListener struct { + L net.Listener +} + +func (ln *defaultListener) Init(addr string) (err error) { + ln.L, err = net.Listen("tcp", addr) + return +} + +func (ln *defaultListener) Accept() (conn net.Conn, err error) { + c, err := ln.L.Accept() + if err != nil { + return nil, err + } + if err = setupKeepalive(c); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (ln *defaultListener) Close() error { + return ln.L.Close() +} + +func setupKeepalive(conn net.Conn) error { + tcpConn := conn.(*net.TCPConn) + if err := tcpConn.SetKeepAlive(true); err != nil { + return err + } + if err := tcpConn.SetKeepAlivePeriod(30 * time.Second); err != nil { + return err + } + return nil +} + +type netListener struct { + F func(addr string) (net.Listener, error) + L net.Listener +} + +func (ln *netListener) Init(addr string) (err error) { + ln.L, err = ln.F(addr) + return +} + +func (ln *netListener) Accept() (conn net.Conn, err error) { + c, err := ln.L.Accept() + if err != nil { + return nil, err + } + return c, nil +} + +func (ln *netListener) Close() error { + return ln.L.Close() +} + +func unixDial(addr string) (conn net.Conn, err error) { + c, err := net.Dial("unix", addr) + if err != nil { + return nil, err + } + return c, err +} + +// NewTCPClient creates a client connecting over TCP to the server +// listening to the given addr. +// +// The returned client must be started after optional settings' adjustment. +// +// The corresponding server must be created with NewTCPServer(). +func NewTCPClient(addr string) *Client { + return &Client{ + Addr: addr, + Dial: defaultDial, + } +} + +// NewTCPServer creates a server listening for TCP connections +// on the given addr and processing incoming requests +// with the given HandlerFunc. +// +// The returned server must be started after optional settings' adjustment. +// +// The corresponding client must be created with NewTCPClient(). +func NewTCPServer(addr string, handler HandlerFunc) *Server { + return &Server{ + Addr: addr, + Handler: handler, + Listener: &defaultListener{}, + } +} + +// NewUnixClient creates a client connecting over unix socket +// to the server listening to the given addr. +// +// The returned client must be started after optional settings' adjustment. +// +// The corresponding server must be created with NewUnixServer(). +func NewUnixClient(addr string) *Client { + return &Client{ + Addr: addr, + Dial: unixDial, + + // There is little sense in compressing rpc data passed + // over local unix sockets. + DisableCompression: true, + + // Sacrifice the number of Write() calls to the smallest + // possible latency, since it has higher priority in local IPC. + FlushDelay: -1, + } +} + +// NewUnixServer creates a server listening for unix connections +// on the given addr and processing incoming requests +// with the given HandlerFunc. +// +// The returned server must be started after optional settings' adjustment. +// +// The corresponding client must be created with NewUnixClient(). +func NewUnixServer(addr string, handler HandlerFunc) *Server { + return &Server{ + Addr: addr, + Handler: handler, + Listener: &netListener{ + F: func(addr string) (net.Listener, error) { + return net.Listen("unix", addr) + }, + }, + + // Sacrifice the number of Write() calls to the smallest + // possible latency, since it has higher priority in local IPC. + FlushDelay: -1, + } +} + +// NewTLSClient creates a client connecting over TLS (aka SSL) to the server +// listening to the given addr using the given TLS config. +// +// The returned client must be started after optional settings' adjustment. +// +// The corresponding server must be created with NewTLSServer(). +func NewTLSClient(addr string, cfg *tls.Config) *Client { + return &Client{ + Addr: addr, + Dial: func(addr string) (conn net.Conn, err error) { + c, err := tls.DialWithDialer(dialer, "tcp", addr, cfg) + if err != nil { + return nil, err + } + return c, err + }, + } +} + +// NewTLSServer creates a server listening for TLS (aka SSL) connections +// on the given addr and processing incoming requests +// with the given HandlerFunc. +// cfg must contain TLS settings for the server. +// +// The returned server must be started after optional settings' adjustment. +// +// The corresponding client must be created with NewTLSClient(). +func NewTLSServer(addr string, handler HandlerFunc, cfg *tls.Config) *Server { + return &Server{ + Addr: addr, + Handler: handler, + Listener: &netListener{ + F: func(addr string) (net.Listener, error) { + return tls.Listen("tcp", addr, cfg) + }, + }, + } +} diff --git a/vendor/github.com/TykTechnologies/goverify/LICENSE b/vendor/github.com/TykTechnologies/goverify/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/TykTechnologies/goverify/README.md b/vendor/github.com/TykTechnologies/goverify/README.md new file mode 100644 index 00000000000..c1b3659d64a --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/README.md @@ -0,0 +1,11 @@ +# RSA Verifier + +This lib makes it easy to verify a string with a signature using an RSA public/private key combination. + +Shamelessly ripped off from SO: + +http://stackoverflow.com/questions/20655702/signing-and-decoding-with-rsa-sha-in-go + +And this play example: + +https://play.golang.org/p/bzpD7Pa9mr \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/goverify/goverify.go b/vendor/github.com/TykTechnologies/goverify/goverify.go new file mode 100644 index 00000000000..ae9372c3d24 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/goverify.go @@ -0,0 +1,3 @@ +package goverify + + diff --git a/vendor/github.com/TykTechnologies/goverify/rsa_signer.go b/vendor/github.com/TykTechnologies/goverify/rsa_signer.go new file mode 100644 index 00000000000..af98c67a539 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/rsa_signer.go @@ -0,0 +1,20 @@ +package goverify + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" +) + +type RSAPrivateKey struct { + *rsa.PrivateKey +} + +// Sign signs data with rsa-sha256 +func (r *RSAPrivateKey) Sign(data []byte) ([]byte, error) { + h := sha256.New() + h.Write(data) + d := h.Sum(nil) + return rsa.SignPKCS1v15(rand.Reader, r.PrivateKey, crypto.SHA256, d) +} diff --git a/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go b/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go new file mode 100644 index 00000000000..7c033f105d2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go @@ -0,0 +1,19 @@ +package goverify + +import ( + "crypto" + "crypto/rsa" + "crypto/sha256" +) + +type RSAPublicKey struct { + *rsa.PublicKey +} + +// Unsign verifies the message using a rsa-sha256 signature +func (r *RSAPublicKey) Verify(message []byte, sig []byte) error { + h := sha256.New() + h.Write(message) + d := h.Sum(nil) + return rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig) +} diff --git a/vendor/github.com/TykTechnologies/goverify/signer.go b/vendor/github.com/TykTechnologies/goverify/signer.go new file mode 100644 index 00000000000..940b88f1d12 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/signer.go @@ -0,0 +1,24 @@ +package goverify + +import ( + "crypto/rsa" + "fmt" +) + +// A Signer is can create signatures that verify against a public key. +type Signer interface { + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(data []byte) ([]byte, error) +} + +func newSignerFromKey(k interface{}) (Signer, error) { + var sshKey Signer + switch t := k.(type) { + case *rsa.PrivateKey: + sshKey = &RSAPrivateKey{t} + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + return sshKey, nil +} diff --git a/vendor/github.com/TykTechnologies/goverify/util.go b/vendor/github.com/TykTechnologies/goverify/util.go new file mode 100644 index 00000000000..177ef2d77e9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/util.go @@ -0,0 +1,95 @@ +package goverify + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" +) + +// loadPrivateKey loads an parses a PEM encoded private key file. +func LoadPublicKeyFromFile(path string) (Verifier, error) { + dat, err := ioutil.ReadFile(path) + + if err != nil { + return nil, err + } + + return parsePublicKey(dat) +} + +func LoadPublicKeyFromString(key string) (Verifier, error) { + + return parsePublicKey([]byte(key)) +} + +// parsePublicKey parses a PEM encoded private key. +func parsePublicKey(pemBytes []byte) (Verifier, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + var rawkey interface{} + switch block.Type { + case "RSA PUBLIC KEY": + rsa, err := x509.ParsePKCS1PublicKey(block.Bytes) + if err != nil { + return nil, err + } + rawkey = rsa + case "PUBLIC KEY": + rsa, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + rawkey = rsa + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } + + return newVerifierFromKey(rawkey) +} + +// loadPrivateKey loads an parses a PEM encoded private key file. +func LoadPrivateKeyFromFile(path string) (Signer, error) { + dat, err := ioutil.ReadFile(path) + + if err != nil { + return nil, err + } + + return parsePrivateKey(dat) +} + +func LoadPrivateKeyFromString(key string) (Signer, error) { + return parsePrivateKey([]byte(key)) +} + +// parsePrivateKey parses a PEM encoded private key. +func parsePrivateKey(pemBytes []byte) (Signer, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + var rawkey interface{} + switch block.Type { + case "RSA PRIVATE KEY": + rsa, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + rawkey = rsa + case "PRIVATE KEY": + privkey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + rawkey = privkey + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } + return newSignerFromKey(rawkey) +} diff --git a/vendor/github.com/TykTechnologies/goverify/verifier.go b/vendor/github.com/TykTechnologies/goverify/verifier.go new file mode 100644 index 00000000000..409766ec9aa --- /dev/null +++ b/vendor/github.com/TykTechnologies/goverify/verifier.go @@ -0,0 +1,24 @@ +package goverify + +import ( + "crypto/rsa" + "fmt" +) + +// A Verifier is can validate signatures that verify against a public key. +type Verifier interface { + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Verify(data []byte, sig []byte) error +} + +func newVerifierFromKey(k interface{}) (Verifier, error) { + var sshKey Verifier + switch t := k.(type) { + case *rsa.PublicKey: + sshKey = &RSAPublicKey{t} + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + return sshKey, nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/TykTechnologies/graphql-go-tools/LICENSE similarity index 97% rename from vendor/github.com/hashicorp/go-hclog/LICENSE rename to vendor/github.com/TykTechnologies/graphql-go-tools/LICENSE index abaf1e45f2a..efc56cd1fce 100644 --- a/vendor/github.com/hashicorp/go-hclog/LICENSE +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2017 HashiCorp +Copyright (c) 2018 Jens Neuse Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/quotes/quotes.go b/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/quotes/quotes.go new file mode 100644 index 00000000000..87f2e66de90 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/quotes/quotes.go @@ -0,0 +1,19 @@ +package quotes + +const ( + quoteStr = "\"" +) + +var ( + quoteBytes = []byte(quoteStr) +) + +func WrapBytes(bytes []byte) []byte { + cp := make([]byte, len(bytes)) + copy(cp, bytes) + return append(quoteBytes, append(cp, quoteBytes...)...) +} + +func WrapString(str string) string { + return quoteStr + str + quoteStr +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes/unsafebytes.go b/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes/unsafebytes.go new file mode 100644 index 00000000000..b581742257e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes/unsafebytes.go @@ -0,0 +1,62 @@ +package unsafebytes + +import ( + "reflect" + "strconv" + "unsafe" +) + +func BytesToInt64(byteSlice []byte) int64 { + out, _ := strconv.ParseInt(*(*string)(unsafe.Pointer(&byteSlice)), 10, 64) + return out +} + +func BytesToInt32(byteSlice []byte) int32 { + out, _ := strconv.ParseInt(*(*string)(unsafe.Pointer(&byteSlice)), 10, 32) + return int32(out) +} + +func BytesToFloat32(byteSlice []byte) float32 { + out, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&byteSlice)), 64) + return float32(out) +} + +func BytesToString(bytes []byte) string { + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + stringHeader := reflect.StringHeader{Data: sliceHeader.Data, Len: sliceHeader.Len} + return *(*string)(unsafe.Pointer(&stringHeader)) //nolint:govet +} + +func BytesToBool(byteSlice []byte) bool { + out, _ := strconv.ParseBool(*(*string)(unsafe.Pointer(&byteSlice))) + return out +} + +func StringToBytes(str string) []byte { + hdr := *(*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:govet + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ //nolint:govet + Data: hdr.Data, + Len: hdr.Len, + Cap: hdr.Len, + })) +} + +func BytesIsValidFloat32(byteSlice []byte) bool { + _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&byteSlice)), 64) + return err == nil +} + +func BytesIsValidInt64(byteSlice []byte) bool { + _, err := strconv.ParseInt(*(*string)(unsafe.Pointer(&byteSlice)), 10, 64) + return err == nil +} + +func BytesIsValidInt32(byteSlice []byte) bool { + _, err := strconv.ParseInt(*(*string)(unsafe.Pointer(&byteSlice)), 10, 32) + return err == nil +} + +func BytesIsValidBool(byteSlice []byte) bool { + _, err := strconv.ParseBool(*(*string)(unsafe.Pointer(&byteSlice))) + return err == nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast.go new file mode 100644 index 00000000000..61fce9c7dfa --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast.go @@ -0,0 +1,260 @@ +//go:generate stringer -type=OperationType,ValueKind,TypeKind,SelectionKind,NodeKind,PathKind -output ast_string.go + +// Package ast defines the GraphQL AST and offers helper methods to interact with the AST, mostly to get the necessary information from the ast. +// +// The document struct is designed in a way to enable performant parsing while keeping the ast easy to use with helper methods. +package ast + +const InvalidRef = -1 + +type Document struct { + Input Input + RootNodes []Node + SchemaDefinitions []SchemaDefinition + SchemaExtensions []SchemaExtension + RootOperationTypeDefinitions []RootOperationTypeDefinition + Directives []Directive + Arguments []Argument + ObjectTypeDefinitions []ObjectTypeDefinition + ObjectTypeExtensions []ObjectTypeExtension + FieldDefinitions []FieldDefinition + Types []Type + InputValueDefinitions []InputValueDefinition + InputObjectTypeDefinitions []InputObjectTypeDefinition + InputObjectTypeExtensions []InputObjectTypeExtension + ScalarTypeDefinitions []ScalarTypeDefinition + ScalarTypeExtensions []ScalarTypeExtension + InterfaceTypeDefinitions []InterfaceTypeDefinition + InterfaceTypeExtensions []InterfaceTypeExtension + UnionTypeDefinitions []UnionTypeDefinition + UnionTypeExtensions []UnionTypeExtension + EnumTypeDefinitions []EnumTypeDefinition + EnumTypeExtensions []EnumTypeExtension + EnumValueDefinitions []EnumValueDefinition + DirectiveDefinitions []DirectiveDefinition + Values []Value + ListValues []ListValue + VariableValues []VariableValue + StringValues []StringValue + IntValues []IntValue + FloatValues []FloatValue + EnumValues []EnumValue + ObjectFields []ObjectField + ObjectValues []ObjectValue + Selections []Selection + SelectionSets []SelectionSet + Fields []Field + InlineFragments []InlineFragment + FragmentSpreads []FragmentSpread + OperationDefinitions []OperationDefinition + VariableDefinitions []VariableDefinition + FragmentDefinitions []FragmentDefinition + BooleanValues [2]BooleanValue + Refs [][8]int + RefIndex int + Index Index +} + +func NewDocument() *Document { + return &Document{ + RootNodes: make([]Node, 0, 48), + RootOperationTypeDefinitions: make([]RootOperationTypeDefinition, 0, 3), + SchemaDefinitions: make([]SchemaDefinition, 0, 2), + SchemaExtensions: make([]SchemaExtension, 0, 2), + Directives: make([]Directive, 0, 16), + Arguments: make([]Argument, 0, 48), + ObjectTypeDefinitions: make([]ObjectTypeDefinition, 0, 48), + ObjectTypeExtensions: make([]ObjectTypeExtension, 0, 4), + Types: make([]Type, 0, 48), + FieldDefinitions: make([]FieldDefinition, 0, 128), + InputValueDefinitions: make([]InputValueDefinition, 0, 128), + InputObjectTypeDefinitions: make([]InputObjectTypeDefinition, 0, 16), + InputObjectTypeExtensions: make([]InputObjectTypeExtension, 0, 4), + ScalarTypeDefinitions: make([]ScalarTypeDefinition, 0, 16), + ScalarTypeExtensions: make([]ScalarTypeExtension, 0, 4), + InterfaceTypeDefinitions: make([]InterfaceTypeDefinition, 0, 16), + InterfaceTypeExtensions: make([]InterfaceTypeExtension, 0, 4), + UnionTypeDefinitions: make([]UnionTypeDefinition, 0, 8), + UnionTypeExtensions: make([]UnionTypeExtension, 0, 4), + EnumTypeDefinitions: make([]EnumTypeDefinition, 0, 8), + EnumTypeExtensions: make([]EnumTypeExtension, 0, 4), + EnumValueDefinitions: make([]EnumValueDefinition, 0, 48), + DirectiveDefinitions: make([]DirectiveDefinition, 0, 8), + VariableValues: make([]VariableValue, 0, 8), + StringValues: make([]StringValue, 0, 24), + EnumValues: make([]EnumValue, 0, 24), + IntValues: make([]IntValue, 0, 128), + FloatValues: make([]FloatValue, 0, 128), + Values: make([]Value, 0, 64), + ListValues: make([]ListValue, 0, 4), + ObjectFields: make([]ObjectField, 0, 64), + ObjectValues: make([]ObjectValue, 0, 16), + Selections: make([]Selection, 0, 128), + SelectionSets: make([]SelectionSet, 0, 48), + Fields: make([]Field, 0, 128), + InlineFragments: make([]InlineFragment, 0, 16), + FragmentSpreads: make([]FragmentSpread, 0, 16), + OperationDefinitions: make([]OperationDefinition, 0, 8), + VariableDefinitions: make([]VariableDefinition, 0, 8), + FragmentDefinitions: make([]FragmentDefinition, 0, 8), + BooleanValues: [2]BooleanValue{false, true}, + Refs: make([][8]int, 48), + RefIndex: -1, + Index: Index{ + nodes: make(map[uint64][]Node, 48), + }, + } +} + +func (d *Document) Reset() { + d.RootNodes = d.RootNodes[:0] + d.SchemaDefinitions = d.SchemaDefinitions[:0] + d.SchemaExtensions = d.SchemaExtensions[:0] + d.RootOperationTypeDefinitions = d.RootOperationTypeDefinitions[:0] + d.Directives = d.Directives[:0] + d.Arguments = d.Arguments[:0] + d.ObjectTypeDefinitions = d.ObjectTypeDefinitions[:0] + d.ObjectTypeExtensions = d.ObjectTypeExtensions[:0] + d.Types = d.Types[:0] + d.FieldDefinitions = d.FieldDefinitions[:0] + d.InputValueDefinitions = d.InputValueDefinitions[:0] + d.InputObjectTypeDefinitions = d.InputObjectTypeDefinitions[:0] + d.InputObjectTypeExtensions = d.InputObjectTypeExtensions[:0] + d.ScalarTypeDefinitions = d.ScalarTypeDefinitions[:0] + d.ScalarTypeExtensions = d.ScalarTypeExtensions[:0] + d.InterfaceTypeDefinitions = d.InterfaceTypeDefinitions[:0] + d.InterfaceTypeExtensions = d.InterfaceTypeExtensions[:0] + d.UnionTypeDefinitions = d.UnionTypeDefinitions[:0] + d.UnionTypeExtensions = d.UnionTypeExtensions[:0] + d.EnumTypeDefinitions = d.EnumTypeDefinitions[:0] + d.EnumTypeExtensions = d.EnumTypeExtensions[:0] + d.EnumValueDefinitions = d.EnumValueDefinitions[:0] + d.DirectiveDefinitions = d.DirectiveDefinitions[:0] + d.VariableValues = d.VariableValues[:0] + d.StringValues = d.StringValues[:0] + d.EnumValues = d.EnumValues[:0] + d.IntValues = d.IntValues[:0] + d.FloatValues = d.FloatValues[:0] + d.Values = d.Values[:0] + d.ListValues = d.ListValues[:0] + d.ObjectFields = d.ObjectFields[:0] + d.ObjectValues = d.ObjectValues[:0] + d.Selections = d.Selections[:0] + d.SelectionSets = d.SelectionSets[:0] + d.Fields = d.Fields[:0] + d.InlineFragments = d.InlineFragments[:0] + d.FragmentSpreads = d.FragmentSpreads[:0] + d.OperationDefinitions = d.OperationDefinitions[:0] + d.VariableDefinitions = d.VariableDefinitions[:0] + d.FragmentDefinitions = d.FragmentDefinitions[:0] + + d.RefIndex = -1 + d.Index.Reset() + d.Input.Reset() +} + +func (d *Document) NextRefIndex() int { + d.RefIndex++ + if d.RefIndex == len(d.Refs) { + d.Refs = append(d.Refs, [8]int{}) + } + return d.RefIndex +} + +func (d *Document) NewEmptyRefs() []int { + return d.Refs[d.NextRefIndex()][:0] +} + +func (d *Document) copyByteSliceReference(ref ByteSliceReference) ByteSliceReference { + if ref.Length() == 0 { + return ByteSliceReference{} + } + src := d.Input.ByteSlice(ref) + dst := make([]byte, len(src)) + copy(dst, src) + return d.Input.AppendInputBytes(dst) +} + +func (d *Document) AddRootNode(node Node) { + d.RootNodes = append(d.RootNodes, node) + d.Index.AddNodeStr(d.NodeNameUnsafeString(node), node) +} + +func (d *Document) ImportRootNode(ref int, kind NodeKind) { + d.AddRootNode(Node{ + Kind: kind, + Ref: ref, + }) +} + +func (d *Document) DeleteRootNodes(nodes []Node) { + for i := range nodes { + d.DeleteRootNode(nodes[i]) + } +} + +func (d *Document) DeleteRootNode(node Node) { + for i := range d.RootNodes { + if d.RootNodes[i].Kind == node.Kind && d.RootNodes[i].Ref == node.Ref { + d.RootNodes = append(d.RootNodes[:i], d.RootNodes[i+1:]...) + return + } + } +} + +func (d *Document) RemoveMergedTypeExtensions() { + for _, node := range d.Index.MergedTypeExtensions { + d.RemoveRootNode(node) + } +} + +func (d *Document) RemoveRootNode(node Node) { + for i := range d.RootNodes { + if d.RootNodes[i] == node { + d.RootNodes = append(d.RootNodes[:i], d.RootNodes[i+1:]...) + return + } + } +} + +func (d *Document) NodeByName(name ByteSlice) (Node, bool) { + return d.Index.FirstNodeByNameBytes(name) +} + +func (d *Document) NodeByNameStr(name string) (Node, bool) { + return d.Index.FirstNodeByNameStr(name) +} + +func (d *Document) TypeDefinitionContainsImplementsInterface(typeName, interfaceName ByteSlice) bool { + typeDefinition, exists := d.Index.FirstNodeByNameBytes(typeName) + if !exists { + return false + } + if typeDefinition.Kind != NodeKindObjectTypeDefinition { + return false + } + return d.ObjectTypeDefinitionImplementsInterface(typeDefinition.Ref, interfaceName) +} + +func FilterIntSliceByWhitelist(intSlice []int, whitelist []int) []int { + if len(intSlice) == 0 || len(whitelist) == 0 { + return []int{} + } + n := 0 + for i := 0; i < len(intSlice); i++ { + if isWhitelisted(intSlice[i], whitelist) { + intSlice[n] = intSlice[i] + n++ + } + } + return intSlice[:n] +} + +func isWhitelisted(value int, whitelisted []int) bool { + for i := 0; i < len(whitelisted); i++ { + if whitelisted[i] == value { + return true + } + } + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_argument.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_argument.go new file mode 100644 index 00000000000..73b78d4d59b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_argument.go @@ -0,0 +1,196 @@ +package ast + +import ( + "bytes" + "io" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type ArgumentList struct { + LPAREN position.Position + Refs []int // Argument + RPAREN position.Position +} + +type Argument struct { + Name ByteSliceReference // e.g. foo + Colon position.Position // : + Value Value // e.g. 100 or "Bar" + Position position.Position + PrintBeforeValue []byte + PrintAfterValue []byte +} + +func (d *Document) CopyArgument(ref int) int { + return d.AddArgument(Argument{ + Name: d.copyByteSliceReference(d.Arguments[ref].Name), + Value: Value{ + Kind: d.Arguments[ref].Value.Kind, + Ref: d.copyValueRef(d.Arguments[ref].Value.Kind, d.Arguments[ref].Value.Ref), + }, + }) +} + +func (d *Document) CopyArgumentList(list ArgumentList) ArgumentList { + refs := d.NewEmptyRefs() + for _, r := range list.Refs { + refs = append(refs, d.CopyArgument(r)) + } + return ArgumentList{Refs: refs} +} + +func (d *Document) PrintArgument(ref int, w io.Writer) error { + _, err := w.Write(d.Input.ByteSlice(d.Arguments[ref].Name)) + if err != nil { + return err + } + _, err = w.Write(literal.COLON) + if err != nil { + return err + } + _, err = w.Write(literal.SPACE) + if err != nil { + return err + } + if d.Arguments[ref].PrintBeforeValue != nil { + _, err = w.Write(d.Arguments[ref].PrintBeforeValue) + if err != nil { + return err + } + } + err = d.PrintValue(d.Arguments[ref].Value, w) + if err != nil { + return err + } + if d.Arguments[ref].PrintAfterValue != nil { + _, err = w.Write(d.Arguments[ref].PrintAfterValue) + if err != nil { + return err + } + } + return nil +} + +func (d *Document) PrintArguments(refs []int, w io.Writer) (err error) { + _, err = w.Write(literal.LPAREN) + if err != nil { + return + } + for i, j := range refs { + err = d.PrintArgument(j, w) + if err != nil { + return + } + if i != len(refs)-1 { + _, err = w.Write(literal.COMMA) + if err != nil { + return + } + _, err = w.Write(literal.SPACE) + if err != nil { + return + } + } + } + _, err = w.Write(literal.RPAREN) + return +} + +func (d *Document) ArgumentNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.Arguments[ref].Name) +} + +func (d *Document) ArgumentNameString(ref int) string { + return unsafebytes.BytesToString(d.ArgumentNameBytes(ref)) +} + +func (d *Document) ArgumentValue(ref int) Value { + return d.Arguments[ref].Value +} + +func (d *Document) ArgumentsAreEqual(left, right int) bool { + return bytes.Equal(d.ArgumentNameBytes(left), d.ArgumentNameBytes(right)) && + d.ValuesAreEqual(d.ArgumentValue(left), d.ArgumentValue(right)) +} + +func (d *Document) ArgumentSetsAreEquals(left, right []int) bool { + if len(left) != len(right) { + return false + } + for i := 0; i < len(left); i++ { + leftArgument, rightArgument := left[i], right[i] + if !d.ArgumentsAreEqual(leftArgument, rightArgument) { + return false + } + } + return true +} + +func (d *Document) ArgumentsBefore(ancestor Node, argument int) []int { + switch ancestor.Kind { + case NodeKindField: + for i, j := range d.Fields[ancestor.Ref].Arguments.Refs { + if argument == j { + return d.Fields[ancestor.Ref].Arguments.Refs[:i] + } + } + case NodeKindDirective: + for i, j := range d.Directives[ancestor.Ref].Arguments.Refs { + if argument == j { + return d.Directives[ancestor.Ref].Arguments.Refs[:i] + } + } + } + return nil +} + +func (d *Document) ArgumentsAfter(ancestor Node, argument int) []int { + switch ancestor.Kind { + case NodeKindField: + for i, j := range d.Fields[ancestor.Ref].Arguments.Refs { + if argument == j { + return d.Fields[ancestor.Ref].Arguments.Refs[i+1:] + } + } + case NodeKindDirective: + for i, j := range d.Directives[ancestor.Ref].Arguments.Refs { + if argument == j { + return d.Directives[ancestor.Ref].Arguments.Refs[i+1:] + } + } + } + return nil +} + +func (d *Document) AddArgument(argument Argument) (ref int) { + d.Arguments = append(d.Arguments, argument) + return len(d.Arguments) - 1 +} + +func (d *Document) ImportArgument(name string, value Value) (ref int) { + arg := Argument{ + Name: d.Input.AppendInputString(name), + Value: value, + } + + return d.AddArgument(arg) +} + +func (d *Document) ImportVariableValueArgument(argName, variableName ByteSlice) (variableValueRef, argRef int) { + variableValueRef = d.ImportVariableValue(variableName) + + arg := Argument{ + Name: d.Input.AppendInputBytes(argName), + Value: Value{ + Kind: ValueKindVariable, + Ref: variableValueRef, + }, + } + + argRef = d.AddArgument(arg) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_description.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_description.go new file mode 100644 index 00000000000..fdd6e15da59 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_description.go @@ -0,0 +1,92 @@ +package ast + +import ( + "io" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes" +) + +type Description struct { + IsDefined bool + IsBlockString bool // true if -> """content""" ; else "content" + Content ByteSliceReference // literal + Position position.Position +} + +// nolint +func (d *Document) PrintDescription(description Description, indent []byte, depth int, writer io.Writer) (err error) { + for i := 0; i < depth; i++ { + _, err = writer.Write(indent) + } + if description.IsBlockString { + _, err = writer.Write(literal.QUOTE) + _, err = writer.Write(literal.QUOTE) + _, err = writer.Write(literal.QUOTE) + _, err = writer.Write(literal.LINETERMINATOR) + for i := 0; i < depth; i++ { + _, err = writer.Write(indent) + } + } else { + _, err = writer.Write(literal.QUOTE) + } + + content := d.Input.ByteSlice(description.Content) + skipWhitespace := false + skippedWhitespace := 0.0 + depthToSkip := float64(depth) + for i := range content { + + if skipWhitespace && skippedWhitespace < depthToSkip { + switch content[i] { + case runes.TAB: + skippedWhitespace += 1 + continue + case runes.SPACE: + skippedWhitespace += 0.5 + continue + } + } + + switch content[i] { + case runes.LINETERMINATOR: + skipWhitespace = true + default: + if skipWhitespace { + for j := 0; j < depth; j++ { + _, err = writer.Write(indent) + } + + skipWhitespace = false + skippedWhitespace = 0.0 + } + } + _, err = writer.Write(content[i : i+1]) + } + if description.IsBlockString { + _, err = writer.Write(literal.LINETERMINATOR) + for i := 0; i < depth; i++ { + _, err = writer.Write(indent) + } + _, err = writer.Write(literal.QUOTE) + _, err = writer.Write(literal.QUOTE) + _, err = writer.Write(literal.QUOTE) + } else { + _, err = writer.Write(literal.QUOTE) + } + return nil +} + +func (d *Document) ImportDescription(desc string) (description Description) { + if desc == "" { + return + } + + return Description{ + IsDefined: true, + IsBlockString: strings.Contains(desc, "\n"), + Content: d.Input.AppendInputString(desc), + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive.go new file mode 100644 index 00000000000..343e1616baa --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive.go @@ -0,0 +1,214 @@ +package ast + +import ( + "bytes" + "io" + + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type DirectiveList struct { + Refs []int +} + +type Directive struct { + At position.Position // @ + Name ByteSliceReference // e.g. include + HasArguments bool + Arguments ArgumentList // e.g. (if: true) +} + +func (l *DirectiveList) HasDirectiveByName(document *Document, name string) bool { + for i := range l.Refs { + if document.DirectiveNameString(l.Refs[i]) == name { + return true + } + } + return false +} + +func (l *DirectiveList) RemoveDirectiveByName(document *Document, name string) { + for i := range l.Refs { + if document.DirectiveNameString(l.Refs[i]) == name { + if i < len(l.Refs)-1 { + l.Refs = append(l.Refs[:i], l.Refs[i+1:]...) + } else { + l.Refs = l.Refs[:i] + } + return + } + } +} + +func (d *Document) CopyDirective(ref int) int { + var arguments ArgumentList + if d.Directives[ref].HasArguments { + arguments = d.CopyArgumentList(d.Directives[ref].Arguments) + } + return d.AddDirective(Directive{ + Name: d.copyByteSliceReference(d.Directives[ref].Name), + HasArguments: d.Directives[ref].HasArguments, + Arguments: arguments, + }) +} + +func (d *Document) CopyDirectiveList(list DirectiveList) DirectiveList { + refs := d.NewEmptyRefs() + for _, r := range list.Refs { + refs = append(refs, d.CopyDirective(r)) + } + return DirectiveList{Refs: refs} +} + +func (d *Document) PrintDirective(ref int, w io.Writer) error { + _, err := w.Write(literal.AT) + if err != nil { + return err + } + _, err = w.Write(d.Input.ByteSlice(d.Directives[ref].Name)) + if err != nil { + return err + } + if d.Directives[ref].HasArguments { + err = d.PrintArguments(d.Directives[ref].Arguments.Refs, w) + } + return err +} + +func (d *Document) DirectiveName(ref int) ByteSliceReference { + return d.Directives[ref].Name +} + +func (d *Document) DirectiveNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.Directives[ref].Name) +} + +func (d *Document) DirectiveNameString(ref int) string { + return d.Input.ByteSliceString(d.Directives[ref].Name) +} + +func (d *Document) DirectiveIsFirst(directive int, ancestor Node) bool { + directives := d.NodeDirectives(ancestor) + return len(directives) != 0 && directives[0] == directive +} + +func (d *Document) DirectiveIsLast(directive int, ancestor Node) bool { + directives := d.NodeDirectives(ancestor) + return len(directives) != 0 && directives[len(directives)-1] == directive +} + +func (d *Document) DirectiveArgumentSet(ref int) []int { + return d.Directives[ref].Arguments.Refs +} + +func (d *Document) DirectiveArgumentValueByName(ref int, name ByteSlice) (Value, bool) { + for i := 0; i < len(d.Directives[ref].Arguments.Refs); i++ { + arg := d.Directives[ref].Arguments.Refs[i] + if bytes.Equal(d.ArgumentNameBytes(arg), name) { + return d.ArgumentValue(arg), true + } + } + return Value{}, false +} + +func (d *Document) DirectivesAreEqual(left, right int) bool { + return d.Input.ByteSliceReferenceContentEquals(d.DirectiveName(left), d.DirectiveName(right)) && + d.ArgumentSetsAreEquals(d.DirectiveArgumentSet(left), d.DirectiveArgumentSet(right)) +} + +func (d *Document) DirectiveSetsAreEqual(left, right []int) bool { + if len(left) != len(right) { + return false + } + for i := 0; i < len(left); i++ { + leftDirective, rightDirective := left[i], right[i] + if !d.DirectivesAreEqual(leftDirective, rightDirective) { + return false + } + } + return true +} + +func (d *Document) AddDirective(directive Directive) (ref int) { + d.Directives = append(d.Directives, directive) + return len(d.Directives) - 1 +} + +func (d *Document) ImportDirective(name string, argRefs []int) (ref int) { + directive := Directive{ + Name: d.Input.AppendInputString(name), + HasArguments: len(argRefs) > 0, + Arguments: ArgumentList{ + Refs: argRefs, + }, + } + + return d.AddDirective(directive) +} + +func (d *Document) AddDirectiveToNode(directiveRef int, node Node) bool { + switch node.Kind { + case NodeKindField: + d.Fields[node.Ref].Directives.Refs = append(d.Fields[node.Ref].Directives.Refs, directiveRef) + d.Fields[node.Ref].HasDirectives = true + return true + case NodeKindVariableDefinition: + d.VariableDefinitions[node.Ref].Directives.Refs = append(d.VariableDefinitions[node.Ref].Directives.Refs, directiveRef) + d.VariableDefinitions[node.Ref].HasDirectives = true + return true + case NodeKindOperationDefinition: + d.OperationDefinitions[node.Ref].Directives.Refs = append(d.OperationDefinitions[node.Ref].Directives.Refs, directiveRef) + d.OperationDefinitions[node.Ref].HasDirectives = true + return true + case NodeKindInputValueDefinition: + d.InputValueDefinitions[node.Ref].Directives.Refs = append(d.InputValueDefinitions[node.Ref].Directives.Refs, directiveRef) + d.InputValueDefinitions[node.Ref].HasDirectives = true + return true + case NodeKindInlineFragment: + d.InlineFragments[node.Ref].Directives.Refs = append(d.InlineFragments[node.Ref].Directives.Refs, directiveRef) + d.InlineFragments[node.Ref].HasDirectives = true + return true + case NodeKindFragmentSpread: + d.FragmentSpreads[node.Ref].Directives.Refs = append(d.FragmentSpreads[node.Ref].Directives.Refs, directiveRef) + d.FragmentSpreads[node.Ref].HasDirectives = true + return true + case NodeKindFragmentDefinition: + d.FragmentDefinitions[node.Ref].Directives.Refs = append(d.FragmentDefinitions[node.Ref].Directives.Refs, directiveRef) + d.FragmentDefinitions[node.Ref].HasDirectives = true + return true + default: + return false + } +} + +func (d *Document) DirectiveIsAllowedOnNodeKind(directiveName string, kind NodeKind, operationType OperationType) bool { + definition, ok := d.DirectiveDefinitionByName(directiveName) + if !ok { + return false + } + + switch kind { + case NodeKindOperationDefinition: + switch operationType { + case OperationTypeQuery: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationQuery) + case OperationTypeMutation: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationMutation) + case OperationTypeSubscription: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationSubscription) + } + case NodeKindField: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationField) + case NodeKindFragmentDefinition: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationFragmentDefinition) + case NodeKindFragmentSpread: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationFragmentSpread) + case NodeKindInlineFragment: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationInlineFragment) + case NodeKindVariableDefinition: + return d.DirectiveDefinitions[definition].DirectiveLocations.Get(ExecutableDirectiveLocationVariableDefinition) + } + + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive_definition.go new file mode 100644 index 00000000000..827607adc64 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_directive_definition.go @@ -0,0 +1,157 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// DirectiveDefinition +// example: +// directive @example on FIELD +type DirectiveDefinition struct { + Description Description // optional, describes the directive + DirectiveLiteral position.Position // directive + At position.Position // @ + Name ByteSliceReference // e.g. example + HasArgumentsDefinitions bool + ArgumentsDefinition InputValueDefinitionList // optional, e.g. (if: Boolean) + On position.Position // on + DirectiveLocations DirectiveLocations // e.g. FIELD + Repeatable Repeatable +} + +type Repeatable struct { + IsRepeatable bool + Position position.Position +} + +func (d *Document) DirectiveDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.DirectiveDefinitions[ref].Name) +} + +func (d *Document) DirectiveDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.DirectiveDefinitions[ref].Name)) +} + +func (d *Document) DirectiveDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.DirectiveDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.DirectiveDefinitions[ref].Description.Content) +} + +func (d *Document) DirectiveDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.DirectiveDefinitionDescriptionBytes(ref)) +} + +func (d *Document) DirectiveArgumentInputValueDefinition(directiveName ByteSlice, argumentName ByteSlice) int { + for i := range d.DirectiveDefinitions { + if bytes.Equal(directiveName, d.Input.ByteSlice(d.DirectiveDefinitions[i].Name)) { + for _, j := range d.DirectiveDefinitions[i].ArgumentsDefinition.Refs { + if bytes.Equal(argumentName, d.Input.ByteSlice(d.InputValueDefinitions[j].Name)) { + return j + } + } + } + } + return -1 +} + +func (d *Document) DirectiveDefinitionArgumentDefaultValueString(directiveName, argumentName string) string { + inputValueDefinition := d.DirectiveArgumentInputValueDefinition(unsafebytes.StringToBytes(directiveName), unsafebytes.StringToBytes(argumentName)) + if inputValueDefinition == -1 { + return "" + } + defaultValue := d.InputValueDefinitionDefaultValue(inputValueDefinition) + if defaultValue.Kind != ValueKindString { + return "" + } + return d.StringValueContentString(defaultValue.Ref) +} + +func (d *Document) DirectiveDefinitionArgumentDefaultValueBool(directiveName, argumentName string) bool { + inputValueDefinition := d.DirectiveArgumentInputValueDefinition(unsafebytes.StringToBytes(directiveName), unsafebytes.StringToBytes(argumentName)) + if inputValueDefinition == -1 { + return false + } + defaultValue := d.InputValueDefinitionDefaultValue(inputValueDefinition) + if defaultValue.Kind != ValueKindBoolean { + return false + } + return bool(d.BooleanValue(defaultValue.Ref)) +} + +func (d *Document) DirectiveDefinitionArgumentDefaultValueInt64(directiveName, argumentName string) int64 { + inputValueDefinition := d.DirectiveArgumentInputValueDefinition(unsafebytes.StringToBytes(directiveName), unsafebytes.StringToBytes(argumentName)) + if inputValueDefinition == -1 { + return -1 + } + defaultValue := d.InputValueDefinitionDefaultValue(inputValueDefinition) + if defaultValue.Kind != ValueKindInteger { + return -1 + } + return d.IntValueAsInt(defaultValue.Ref) +} + +func (d *Document) DirectiveDefinitionArgumentDefaultValueFloat32(directiveName, argumentName string) float32 { + inputValueDefinition := d.DirectiveArgumentInputValueDefinition(unsafebytes.StringToBytes(directiveName), unsafebytes.StringToBytes(argumentName)) + if inputValueDefinition == -1 { + return -1 + } + defaultValue := d.InputValueDefinitionDefaultValue(inputValueDefinition) + if defaultValue.Kind != ValueKindFloat { + return -1 + } + return d.FloatValueAsFloat32(defaultValue.Ref) +} + +func (d *Document) AddDirectiveDefinition(directiveDefinition DirectiveDefinition) (ref int) { + d.DirectiveDefinitions = append(d.DirectiveDefinitions, directiveDefinition) + return len(d.DirectiveDefinitions) - 1 +} + +func (d *Document) ImportDirectiveDefinition(name, description string, argsRefs []int, locations []string) (ref int) { + directiveLocations := DirectiveLocations{} + for _, location := range locations { + _ = directiveLocations.SetFromRaw([]byte(location)) + } + + definition := DirectiveDefinition{ + Description: d.ImportDescription(description), + Name: d.Input.AppendInputString(name), + HasArgumentsDefinitions: len(argsRefs) > 0, + ArgumentsDefinition: InputValueDefinitionList{ + Refs: argsRefs, + }, + DirectiveLocations: directiveLocations, + } + + ref = d.AddDirectiveDefinition(definition) + d.ImportRootNode(ref, NodeKindDirectiveDefinition) + + return +} + +func (d *Document) DirectiveDefinitionByName(name string) (int, bool) { + for i := range d.DirectiveDefinitions { + if name == d.Input.ByteSliceString(d.DirectiveDefinitions[i].Name) { + return i, true + } + } + return -1, false +} + +func (d *Document) DirectiveDefinitionByNameBytes(name []byte) (int, bool) { + for i := range d.DirectiveDefinitions { + if bytes.Equal(name, d.Input.ByteSlice(d.DirectiveDefinitions[i].Name)) { + return i, true + } + } + return -1, false +} + +func (d *Document) DirectiveDefinitionIsRepeatable(ref int) bool { + return d.DirectiveDefinitions[ref].Repeatable.IsRepeatable +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_definition.go new file mode 100644 index 00000000000..430c0ff62d0 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_definition.go @@ -0,0 +1,91 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// EnumTypeDefinition +// example: +// enum Direction { +// NORTH +// EAST +// SOUTH +// WEST +// } +type EnumTypeDefinition struct { + Description Description // optional, describes enum + EnumLiteral position.Position // enum + Name ByteSliceReference // e.g. Direction + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + HasEnumValuesDefinition bool + EnumValuesDefinition EnumValueDefinitionList // optional, e.g. { NORTH EAST } +} + +func (d *Document) EnumTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.EnumTypeDefinitions[ref].Name) +} + +func (d *Document) EnumTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.EnumTypeDefinitions[ref].Name)) +} + +func (d *Document) EnumTypeDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.EnumTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.EnumTypeDefinitions[ref].Description.Content) +} + +func (d *Document) EnumTypeDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.EnumTypeDefinitionDescriptionBytes(ref)) +} + +func (d *Document) EnumTypeDefinitionHasDirectives(ref int) bool { + return d.EnumTypeDefinitions[ref].HasDirectives +} + +func (d *Document) EnumTypeDefinitionHasEnumValueDefinition(ref int) bool { + return d.EnumTypeDefinitions[ref].HasEnumValuesDefinition +} + +func (d *Document) EnumTypeDefinitionContainsEnumValue(enumTypeDef int, valueName ByteSlice) bool { + for _, i := range d.EnumTypeDefinitions[enumTypeDef].EnumValuesDefinition.Refs { + if bytes.Equal(valueName, d.EnumValueDefinitionNameBytes(i)) { + return true + } + } + return false +} + +func (d *Document) AddEnumTypeDefinition(definition EnumTypeDefinition) (ref int) { + d.EnumTypeDefinitions = append(d.EnumTypeDefinitions, definition) + return len(d.EnumTypeDefinitions) - 1 +} + +func (d *Document) ImportEnumTypeDefinition(name, description string, valueRefs []int) (ref int) { + return d.ImportEnumTypeDefinitionWithDirectives(name, description, valueRefs, nil) +} + +func (d *Document) ImportEnumTypeDefinitionWithDirectives(name, description string, valueRefs []int, directiveRefs []int) (ref int) { + definition := EnumTypeDefinition{ + Description: d.ImportDescription(description), + Name: d.Input.AppendInputString(name), + HasEnumValuesDefinition: len(valueRefs) > 0, + EnumValuesDefinition: EnumValueDefinitionList{ + Refs: valueRefs, + }, + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + ref = d.AddEnumTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindEnumTypeDefinition) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_extension.go new file mode 100644 index 00000000000..a91d5ca251f --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_type_extension.go @@ -0,0 +1,62 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type EnumTypeExtension struct { + ExtendLiteral position.Position + EnumTypeDefinition +} + +func (d *Document) EnumTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.EnumTypeExtensions[ref].Name) +} + +func (d *Document) EnumTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.EnumTypeExtensions[ref].Name)) +} + +func (d *Document) EnumTypeExtensionDescriptionBytes(ref int) ByteSlice { + if !d.EnumTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.EnumTypeExtensions[ref].Description.Content) +} + +func (d *Document) EnumTypeExtensionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.EnumTypeExtensionDescriptionBytes(ref)) +} + +func (d *Document) EnumTypeExtensionHasEnumValueDefinition(ref int) bool { + return d.EnumTypeExtensions[ref].HasEnumValuesDefinition +} + +func (d *Document) EnumTypeExtensionHasDirectives(ref int) bool { + return d.EnumTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendEnumTypeDefinitionByEnumTypeExtension(enumTypeDefinitionRef, enumTypeExtensionRef int) { + if d.EnumTypeExtensionHasDirectives(enumTypeExtensionRef) { + d.EnumTypeDefinitions[enumTypeDefinitionRef].Directives.Refs = append(d.EnumTypeDefinitions[enumTypeDefinitionRef].Directives.Refs, d.EnumTypeExtensions[enumTypeExtensionRef].Directives.Refs...) + d.EnumTypeDefinitions[enumTypeDefinitionRef].HasDirectives = true + } + + if d.EnumTypeExtensionHasEnumValueDefinition(enumTypeExtensionRef) { + d.EnumTypeDefinitions[enumTypeDefinitionRef].EnumValuesDefinition.Refs = append(d.EnumTypeDefinitions[enumTypeDefinitionRef].EnumValuesDefinition.Refs, d.EnumTypeExtensions[enumTypeExtensionRef].EnumValuesDefinition.Refs...) + d.EnumTypeDefinitions[enumTypeDefinitionRef].HasEnumValuesDefinition = true + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: enumTypeExtensionRef, Kind: NodeKindEnumTypeExtension}) +} + +func (d *Document) ImportAndExtendEnumTypeDefinitionByEnumTypeExtension(enumTypeExtensionRef int) { + d.ImportEnumTypeDefinitionWithDirectives( + d.EnumTypeExtensionNameString(enumTypeExtensionRef), + d.EnumTypeExtensionDescriptionString(enumTypeExtensionRef), + d.EnumTypeExtensions[enumTypeExtensionRef].EnumValuesDefinition.Refs, + d.EnumTypeExtensions[enumTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: enumTypeExtensionRef, Kind: NodeKindEnumTypeExtension}) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_value_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_value_definition.go new file mode 100644 index 00000000000..13f82566a21 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_enum_value_definition.go @@ -0,0 +1,104 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type EnumValueDefinitionList struct { + LBRACE position.Position // { + Refs []int // EnumValueDefinition + RBRACE position.Position // } +} + +// EnumValueDefinition +// example: +// "NORTH enum value" NORTH @foo +type EnumValueDefinition struct { + Description Description // optional, describes enum value + EnumValue ByteSliceReference // e.g. NORTH (Name but not true, false or null + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo +} + +func (d *Document) EnumValueDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.EnumValueDefinitions[ref].EnumValue) +} + +func (d *Document) EnumValueDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.EnumValueDefinitions[ref].EnumValue)) +} + +func (d *Document) EnumValueDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.EnumValueDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.EnumValueDefinitions[ref].Description.Content) +} + +func (d *Document) EnumValueDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.EnumValueDefinitionDescriptionBytes(ref)) +} + +func (d *Document) EnumValueDefinitionHasDirectives(ref int) bool { + return d.EnumValueDefinitions[ref].HasDirectives +} + +func (d *Document) EnumValueDefinitionDirectives(ref int) (refs []int) { + return d.EnumValueDefinitions[ref].Directives.Refs +} + +func (d *Document) EnumValueDefinitionDirectiveByName(definitionRef int, directiveName ByteSlice) (ref int, exists bool) { + for _, i := range d.EnumValueDefinitions[definitionRef].Directives.Refs { + if bytes.Equal(directiveName, d.DirectiveNameBytes(i)) { + return i, true + } + } + return +} + +func (d *Document) EnumValueDefinitionIsFirst(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindEnumTypeDefinition: + return d.EnumTypeDefinitions[ancestor.Ref].EnumValuesDefinition.Refs != nil && + d.EnumTypeDefinitions[ancestor.Ref].EnumValuesDefinition.Refs[0] == ref + case NodeKindEnumTypeExtension: + return d.EnumTypeExtensions[ancestor.Ref].EnumValuesDefinition.Refs != nil && + d.EnumTypeExtensions[ancestor.Ref].EnumValuesDefinition.Refs[0] == ref + default: + return false + } +} + +func (d *Document) EnumValueDefinitionIsLast(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindEnumTypeDefinition: + return d.EnumTypeDefinitions[ancestor.Ref].EnumValuesDefinition.Refs != nil && + d.EnumTypeDefinitions[ancestor.Ref].EnumValuesDefinition.Refs[len(d.EnumTypeDefinitions[ancestor.Ref].EnumValuesDefinition.Refs)-1] == ref + case NodeKindEnumTypeExtension: + return d.EnumTypeExtensions[ancestor.Ref].EnumValuesDefinition.Refs != nil && + d.EnumTypeExtensions[ancestor.Ref].EnumValuesDefinition.Refs[len(d.EnumTypeExtensions[ancestor.Ref].EnumValuesDefinition.Refs)-1] == ref + default: + return false + } +} + +func (d *Document) AddEnumValueDefinition(inputValueDefinition EnumValueDefinition) (ref int) { + d.EnumValueDefinitions = append(d.EnumValueDefinitions, inputValueDefinition) + return len(d.EnumValueDefinitions) - 1 +} + +func (d *Document) ImportEnumValueDefinition(value, description string, directiveRefs []int) (ref int) { + inputValueDef := EnumValueDefinition{ + Description: d.ImportDescription(description), + EnumValue: d.Input.AppendInputString(value), + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + return d.AddEnumValueDefinition(inputValueDef) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field.go new file mode 100644 index 00000000000..f8d58452793 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field.go @@ -0,0 +1,130 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type Field struct { + Alias Alias // optional, e.g. renamed: + Name ByteSliceReference // field name, e.g. id + HasArguments bool + Arguments ArgumentList // optional + HasDirectives bool + Directives DirectiveList // optional + SelectionSet int // optional + HasSelections bool + Position position.Position +} + +func (d *Document) CopyField(ref int) int { + var arguments ArgumentList + var directives DirectiveList + var selectionSet int + if d.Fields[ref].HasArguments { + arguments = d.CopyArgumentList(d.Fields[ref].Arguments) + } + if d.Fields[ref].HasDirectives { + directives = d.CopyDirectiveList(d.Fields[ref].Directives) + } + if d.Fields[ref].HasSelections { + selectionSet = d.CopySelectionSet(d.Fields[ref].SelectionSet) + } + return d.AddField(Field{ + Name: d.copyByteSliceReference(d.Fields[ref].Name), + Alias: d.CopyAlias(d.Fields[ref].Alias), + HasArguments: d.Fields[ref].HasArguments, + Arguments: arguments, + HasDirectives: d.Fields[ref].HasDirectives, + Directives: directives, + HasSelections: d.Fields[ref].HasSelections, + SelectionSet: selectionSet, + }).Ref +} + +func (d *Document) FieldNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.Fields[ref].Name) +} + +// FieldNameUnsafeString - returns field name as a string which is unsafe pointer to document input content +func (d *Document) FieldNameUnsafeString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.Fields[ref].Name)) +} + +// FieldNameString - returns fied name as a string value +func (d *Document) FieldNameString(ref int) string { + return string(d.Input.ByteSlice(d.Fields[ref].Name)) +} + +func (d *Document) AddField(field Field) Node { + d.Fields = append(d.Fields, field) + return Node{ + Kind: NodeKindField, + Ref: len(d.Fields) - 1, + } +} + +func (d *Document) AddArgumentToField(fieldRef, argRef int) { + if !d.Fields[fieldRef].HasArguments { + d.Fields[fieldRef].HasArguments = true + d.Fields[fieldRef].Arguments.Refs = d.Refs[d.NextRefIndex()][:0] + } + d.Fields[fieldRef].Arguments.Refs = append(d.Fields[fieldRef].Arguments.Refs, argRef) +} + +func (d *Document) FieldArguments(ref int) []int { + return d.Fields[ref].Arguments.Refs +} + +func (d *Document) FieldArgument(field int, name ByteSlice) (ref int, exists bool) { + for _, i := range d.Fields[field].Arguments.Refs { + if bytes.Equal(d.ArgumentNameBytes(i), name) { + return i, true + } + } + return -1, false +} + +func (d *Document) FieldDirectives(ref int) []int { + return d.Fields[ref].Directives.Refs +} + +func (d *Document) FieldsHaveSameShape(left, right int) bool { + leftAliasDefined := d.FieldAliasIsDefined(left) + rightAliasDefined := d.FieldAliasIsDefined(right) + + switch { + case !leftAliasDefined && !rightAliasDefined: + return d.Input.ByteSliceReferenceContentEquals(d.Fields[left].Name, d.Fields[right].Name) + case leftAliasDefined && rightAliasDefined: + return d.Input.ByteSliceReferenceContentEquals(d.Fields[left].Alias.Name, d.Fields[right].Alias.Name) + case leftAliasDefined && !rightAliasDefined: + return d.Input.ByteSliceReferenceContentEquals(d.Fields[left].Alias.Name, d.Fields[right].Name) + case !leftAliasDefined && rightAliasDefined: + return d.Input.ByteSliceReferenceContentEquals(d.Fields[left].Name, d.Fields[right].Alias.Name) + default: + return false + } +} + +func (d *Document) FieldHasArguments(ref int) bool { + return d.Fields[ref].HasArguments +} + +func (d *Document) FieldHasSelections(ref int) bool { + return d.Fields[ref].HasSelections +} + +func (d *Document) FieldHasDirectives(ref int) bool { + return d.Fields[ref].HasDirectives +} + +func (d *Document) FieldsAreEqualFlat(left, right int) bool { + return bytes.Equal(d.FieldNameBytes(left), d.FieldNameBytes(right)) && // name + bytes.Equal(d.FieldAliasBytes(left), d.FieldAliasBytes(right)) && // alias + !d.FieldHasSelections(left) && !d.FieldHasSelections(right) && // selections + d.ArgumentSetsAreEquals(d.FieldArguments(left), d.FieldArguments(right)) && // arguments + d.DirectiveSetsAreEqual(d.FieldDirectives(left), d.FieldDirectives(right)) // directives +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_alias.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_alias.go new file mode 100644 index 00000000000..389870205fc --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_alias.go @@ -0,0 +1,48 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type Alias struct { + IsDefined bool + Name ByteSliceReference // optional, e.g. renamedField + Colon position.Position // : +} + +func (d *Document) CopyAlias(alias Alias) Alias { + return Alias{ + IsDefined: alias.IsDefined, + Name: d.copyByteSliceReference(alias.Name), + } +} + +func (d *Document) FieldAliasOrNameBytes(ref int) ByteSlice { + if d.FieldAliasIsDefined(ref) { + return d.FieldAliasBytes(ref) + } + return d.FieldNameBytes(ref) +} + +func (d *Document) FieldAliasOrNameString(ref int) string { + return unsafebytes.BytesToString(d.FieldAliasOrNameBytes(ref)) +} + +func (d *Document) FieldAliasBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.Fields[ref].Alias.Name) +} + +func (d *Document) FieldAliasString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.Fields[ref].Alias.Name)) +} + +func (d *Document) FieldAliasIsDefined(ref int) bool { + return d.Fields[ref].Alias.IsDefined +} + +func (d *Document) RemoveFieldAlias(ref int) { + d.Fields[ref].Alias.IsDefined = false + d.Fields[ref].Alias.Name.Start = 0 + d.Fields[ref].Alias.Name.End = 0 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_definition.go new file mode 100644 index 00000000000..eff6169655a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_field_definition.go @@ -0,0 +1,152 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type FieldDefinitionList struct { + LBRACE position.Position // { + Refs []int // FieldDefinition + RBRACE position.Position // } +} + +type FieldDefinition struct { + Description Description // optional e.g. "FieldDefinition is ..." + Name ByteSliceReference // e.g. foo + HasArgumentsDefinitions bool + ArgumentsDefinition InputValueDefinitionList // optional + Colon position.Position // : + Type int // e.g. String + HasDirectives bool + Directives DirectiveList // e.g. @foo +} + +func (d *Document) FieldDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.FieldDefinitions[ref].Name) +} + +func (d *Document) FieldDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.FieldDefinitionNameBytes(ref)) +} + +func (d *Document) FieldDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.FieldDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.FieldDefinitions[ref].Description.Content) +} + +func (d *Document) FieldDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.FieldDefinitionDescriptionBytes(ref)) +} + +func (d *Document) FieldDefinitionIsFirst(field int, ancestor Node) bool { + definitions := d.NodeFieldDefinitions(ancestor) + return len(definitions) != 0 && definitions[0] == field +} + +func (d *Document) FieldDefinitionIsLast(field int, ancestor Node) bool { + definitions := d.NodeFieldDefinitions(ancestor) + return len(definitions) != 0 && definitions[len(definitions)-1] == field +} + +func (d *Document) FieldDefinitionHasDirectives(ref int) bool { + return d.FieldDefinitions[ref].HasDirectives +} + +func (d *Document) FieldDefinitionDirectives(fieldDefinition int) (refs []int) { + return d.FieldDefinitions[fieldDefinition].Directives.Refs +} + +func (d *Document) FieldDefinitionDirectiveByName(fieldDefinition int, directiveName ByteSlice) (ref int, exists bool) { + for _, i := range d.FieldDefinitions[fieldDefinition].Directives.Refs { + if bytes.Equal(directiveName, d.DirectiveNameBytes(i)) { + return i, true + } + } + return +} + +func (d *Document) FieldDefinitionHasNamedDirective(fieldDefinition int, directiveName string) bool { + _, exists := d.FieldDefinitionDirectiveByName(fieldDefinition, unsafebytes.StringToBytes(directiveName)) + return exists +} + +func (d *Document) FieldDefinitionResolverTypeName(enclosingType Node) ByteSlice { + switch enclosingType.Kind { + case NodeKindObjectTypeDefinition: + name := d.ObjectTypeDefinitionNameBytes(enclosingType.Ref) + switch { + case bytes.Equal(name, d.Index.QueryTypeName): + return literal.QUERY + case bytes.Equal(name, d.Index.MutationTypeName): + return literal.MUTATION + case bytes.Equal(name, d.Index.SubscriptionTypeName): + return literal.SUBSCRIPTION + } + } + return d.NodeNameBytes(enclosingType) +} + +func (d *Document) AddFieldDefinition(fieldDefinition FieldDefinition) (ref int) { + d.FieldDefinitions = append(d.FieldDefinitions, fieldDefinition) + return len(d.FieldDefinitions) - 1 +} + +func (d *Document) ImportFieldDefinition(name, description string, typeRef int, argsRefs []int, directiveRefs []int) (ref int) { + fieldDef := FieldDefinition{ + Name: d.Input.AppendInputString(name), + Type: typeRef, + Description: d.ImportDescription(description), + ArgumentsDefinition: InputValueDefinitionList{ + Refs: argsRefs, + }, + HasArgumentsDefinitions: len(argsRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + HasDirectives: len(directiveRefs) > 0, + } + + return d.AddFieldDefinition(fieldDef) +} + +func (d *Document) FieldDefinitionsContainField(definitions []int, field ByteSlice) bool { + for _, i := range definitions { + if bytes.Equal(field, d.FieldDefinitionNameBytes(i)) { + return true + } + } + return false +} + +func (d *Document) FieldDefinitionHasArgumentsDefinitions(ref int) bool { + return d.FieldDefinitions[ref].HasArgumentsDefinitions +} + +func (d *Document) FieldDefinitionArgumentsDefinitions(ref int) []int { + return d.FieldDefinitions[ref].ArgumentsDefinition.Refs +} + +func (d *Document) FieldDefinitionType(ref int) int { + return d.FieldDefinitions[ref].Type +} + +func (d *Document) FieldDefinitionTypeNode(ref int) Node { + typeName := d.ResolveTypeNameBytes(d.FieldDefinitions[ref].Type) + node, _ := d.Index.FirstNodeByNameBytes(typeName) + return node +} + +func (d *Document) RemoveFieldDefinitionsFromObjectTypeDefinition(fieldDefinitionRefs []int, objectTypeDefinitionRef int) { + for _, fieldRef := range fieldDefinitionRefs { + if i, ok := indexOf(d.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs, fieldRef); ok { + deleteRef(&d.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs, i) + } + } + d.ObjectTypeDefinitions[objectTypeDefinitionRef].HasFieldDefinitions = len(d.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs) > 0 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_definition.go new file mode 100644 index 00000000000..b31da6034cc --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_definition.go @@ -0,0 +1,72 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// TypeCondition +// example: +// on User +type TypeCondition struct { + On position.Position // on + Type int // NamedType +} + +// FragmentDefinition +// example: +// fragment friendFields on User { +// id +// name +// profilePic(size: 50) +// } +type FragmentDefinition struct { + FragmentLiteral position.Position // fragment + Name ByteSliceReference // Name but not on, e.g. friendFields + TypeCondition TypeCondition // e.g. on User + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + SelectionSet int // e.g. { id } + HasSelections bool +} + +func (d *Document) FragmentDefinitionRef(byName ByteSlice) (ref int, exists bool) { + for i := range d.FragmentDefinitions { + if bytes.Equal(byName, d.Input.ByteSlice(d.FragmentDefinitions[i].Name)) { + return i, true + } + } + return -1, false +} + +func (d *Document) FragmentDefinitionTypeName(ref int) ByteSlice { + return d.ResolveTypeNameBytes(d.FragmentDefinitions[ref].TypeCondition.Type) +} + +func (d *Document) FragmentDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.FragmentDefinitions[ref].Name) +} + +func (d *Document) FragmentDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.FragmentDefinitions[ref].Name)) +} + +func (d *Document) FragmentDefinitionIsLastRootNode(ref int) bool { + for i := range d.RootNodes { + if d.RootNodes[i].Kind == NodeKindFragmentDefinition && d.RootNodes[i].Ref == ref { + return len(d.RootNodes)-1 == i + } + } + return false +} + +func (d *Document) FragmentDefinitionIsUsed(name ByteSlice) bool { + for _, i := range d.Index.ReplacedFragmentSpreads { + if bytes.Equal(name, d.FragmentSpreadNameBytes(i)) { + return true + } + } + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_spread.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_spread.go new file mode 100644 index 00000000000..5ca7973ea46 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_fragment_spread.go @@ -0,0 +1,80 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// FragmentSpread +// example: +// ...MyFragment +type FragmentSpread struct { + Spread position.Position // ... + FragmentName ByteSliceReference // Name but not on, e.g. MyFragment + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo +} + +func (d *Document) CopyFragmentSpread(ref int) int { + var directives DirectiveList + if d.FragmentSpreads[ref].HasDirectives { + directives = d.CopyDirectiveList(d.FragmentSpreads[ref].Directives) + } + return d.AddFragmentSpread(FragmentSpread{ + FragmentName: d.copyByteSliceReference(d.FragmentSpreads[ref].FragmentName), + HasDirectives: d.FragmentSpreads[ref].HasDirectives, + Directives: directives, + }) +} + +func (d *Document) AddFragmentSpread(spread FragmentSpread) int { + d.FragmentSpreads = append(d.FragmentSpreads, spread) + return len(d.FragmentSpreads) - 1 +} + +func (d *Document) FragmentSpreadNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.FragmentSpreads[ref].FragmentName) +} + +func (d *Document) FragmentSpreadNameString(ref int) string { + return unsafebytes.BytesToString(d.FragmentSpreadNameBytes(ref)) +} + +// ReplaceFragmentSpread replaces a fragment spread with a given selection set +// attention! this might lead to duplicate field problems because the same field with its unique field reference might be copied into the same selection set +// possible problems: changing directives or sub selections will affect both fields with the same id +// simple solution: run normalization deduplicate fields +// as part of the normalization flow this problem will be handled automatically +// just be careful in case you use this function outside of the normalization package +func (d *Document) ReplaceFragmentSpread(selectionSet int, spreadRef int, replaceWithSelectionSet int) { + for i, j := range d.SelectionSets[selectionSet].SelectionRefs { + if d.Selections[j].Kind == SelectionKindFragmentSpread && d.Selections[j].Ref == spreadRef { + d.SelectionSets[selectionSet].SelectionRefs = append(d.SelectionSets[selectionSet].SelectionRefs[:i], append(d.SelectionSets[replaceWithSelectionSet].SelectionRefs, d.SelectionSets[selectionSet].SelectionRefs[i+1:]...)...) + d.Index.ReplacedFragmentSpreads = append(d.Index.ReplacedFragmentSpreads, spreadRef) + return + } + } +} + +// ReplaceFragmentSpreadWithInlineFragment replaces a given fragment spread with a inline fragment +// attention! the same rules apply as for 'ReplaceFragmentSpread', look above! +func (d *Document) ReplaceFragmentSpreadWithInlineFragment(selectionSet int, spreadRef int, replaceWithSelectionSet int, typeCondition TypeCondition) { + d.InlineFragments = append(d.InlineFragments, InlineFragment{ + TypeCondition: typeCondition, + SelectionSet: replaceWithSelectionSet, + HasSelections: len(d.SelectionSets[replaceWithSelectionSet].SelectionRefs) != 0, + }) + ref := len(d.InlineFragments) - 1 + d.Selections = append(d.Selections, Selection{ + Kind: SelectionKindInlineFragment, + Ref: ref, + }) + selectionRef := len(d.Selections) - 1 + for i, j := range d.SelectionSets[selectionSet].SelectionRefs { + if d.Selections[j].Kind == SelectionKindFragmentSpread && d.Selections[j].Ref == spreadRef { + d.SelectionSets[selectionSet].SelectionRefs = append(d.SelectionSets[selectionSet].SelectionRefs[:i], append([]int{selectionRef}, d.SelectionSets[selectionSet].SelectionRefs[i+1:]...)...) + d.Index.ReplacedFragmentSpreads = append(d.Index.ReplacedFragmentSpreads, spreadRef) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_inline_fragment.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_inline_fragment.go new file mode 100644 index 00000000000..79366b6c237 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_inline_fragment.go @@ -0,0 +1,71 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// InlineFragment +// example: +// ... on User { +// friends { +// count +// } +// } +type InlineFragment struct { + Spread position.Position // ... + TypeCondition TypeCondition // on NamedType, e.g. on User + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + SelectionSet int // optional, e.g. { nextField } + HasSelections bool +} + +func (d *Document) CopyInlineFragment(ref int) int { + var directives DirectiveList + var selectionSet int + if d.InlineFragments[ref].HasDirectives { + directives = d.CopyDirectiveList(d.InlineFragments[ref].Directives) + } + if d.InlineFragments[ref].HasSelections { + selectionSet = d.CopySelectionSet(d.InlineFragments[ref].SelectionSet) + } + return d.AddInlineFragment(InlineFragment{ + TypeCondition: d.InlineFragments[ref].TypeCondition, // Value type; doesn't need to be copied. + HasDirectives: d.InlineFragments[ref].HasDirectives, + Directives: directives, + SelectionSet: selectionSet, + HasSelections: d.InlineFragments[ref].HasSelections, + }) +} + +func (d *Document) InlineFragmentTypeConditionName(ref int) ByteSlice { + if d.InlineFragments[ref].TypeCondition.Type == -1 { + return nil + } + return d.Input.ByteSlice(d.Types[d.InlineFragments[ref].TypeCondition.Type].Name) +} + +func (d *Document) InlineFragmentTypeConditionNameString(ref int) string { + return unsafebytes.BytesToString(d.InlineFragmentTypeConditionName(ref)) +} + +func (d *Document) InlineFragmentHasTypeCondition(ref int) bool { + return d.InlineFragments[ref].TypeCondition.Type != -1 +} + +func (d *Document) InlineFragmentHasDirectives(ref int) bool { + return len(d.InlineFragments[ref].Directives.Refs) != 0 +} + +func (d *Document) InlineFragmentSelections(ref int) []int { + if !d.InlineFragments[ref].HasSelections { + return nil + } + return d.SelectionSets[d.InlineFragments[ref].SelectionSet].SelectionRefs +} + +func (d *Document) AddInlineFragment(fragment InlineFragment) int { + d.InlineFragments = append(d.InlineFragments, fragment) + return len(d.InlineFragments) - 1 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_definition.go new file mode 100644 index 00000000000..7cc7409a4a9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_definition.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type InputObjectTypeDefinition struct { + Description Description // optional, describes the input type + InputLiteral position.Position // input + Name ByteSliceReference // name of the input type + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + HasInputFieldsDefinition bool + InputFieldsDefinition InputValueDefinitionList // e.g. x:Float +} + +func (d *Document) InputObjectTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.InputObjectTypeDefinitions[ref].Name) +} + +func (d *Document) InputObjectTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.InputObjectTypeDefinitions[ref].Name)) +} + +func (d *Document) InputObjectTypeDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.InputObjectTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.InputObjectTypeDefinitions[ref].Description.Content) +} + +func (d *Document) InputObjectTypeDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.InputObjectTypeDefinitionDescriptionBytes(ref)) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionDefaultValueString(inputObjectTypeDefinitionName, inputValueDefinitionName string) string { + defaultValue := d.InputObjectTypeDefinitionInputValueDefinitionDefaultValue(inputObjectTypeDefinitionName, inputValueDefinitionName) + if defaultValue.Kind != ValueKindString { + return "" + } + return d.StringValueContentString(defaultValue.Ref) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionDefaultValueBool(inputObjectTypeDefinitionName, inputValueDefinitionName string) bool { + defaultValue := d.InputObjectTypeDefinitionInputValueDefinitionDefaultValue(inputObjectTypeDefinitionName, inputValueDefinitionName) + if defaultValue.Kind != ValueKindBoolean { + return false + } + return bool(d.BooleanValue(defaultValue.Ref)) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionDefaultValueInt64(inputObjectTypeDefinitionName, inputValueDefinitionName string) int64 { + defaultValue := d.InputObjectTypeDefinitionInputValueDefinitionDefaultValue(inputObjectTypeDefinitionName, inputValueDefinitionName) + if defaultValue.Kind != ValueKindInteger { + return -1 + } + return d.IntValueAsInt(defaultValue.Ref) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionDefaultValueFloat32(inputObjectTypeDefinitionName, inputValueDefinitionName string) float32 { + defaultValue := d.InputObjectTypeDefinitionInputValueDefinitionDefaultValue(inputObjectTypeDefinitionName, inputValueDefinitionName) + if defaultValue.Kind != ValueKindFloat { + return -1 + } + return d.FloatValueAsFloat32(defaultValue.Ref) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionDefaultValue(inputObjectTypeDefinitionName, inputValueDefinitionName string) Value { + inputObjectTypeDefinition, exists := d.Index.FirstNodeByNameStr(inputObjectTypeDefinitionName) + if !exists { + return Value{} + } + if inputObjectTypeDefinition.Kind != NodeKindInputObjectTypeDefinition { + return Value{} + } + inputValueDefinition := d.InputObjectTypeDefinitionInputValueDefinitionByName(inputObjectTypeDefinition.Ref, unsafebytes.StringToBytes(inputValueDefinitionName)) + if inputValueDefinition == -1 { + return Value{} + } + return d.InputValueDefinitionDefaultValue(inputValueDefinition) +} + +func (d *Document) InputObjectTypeDefinitionInputValueDefinitionByName(definition int, inputValueDefinitionName ByteSlice) int { + for _, i := range d.InputObjectTypeDefinitions[definition].InputFieldsDefinition.Refs { + if bytes.Equal(inputValueDefinitionName, d.InputValueDefinitionNameBytes(i)) { + return i + } + } + return -1 +} + +func (d *Document) AddInputObjectTypeDefinition(definition InputObjectTypeDefinition) (ref int) { + d.InputObjectTypeDefinitions = append(d.InputObjectTypeDefinitions, definition) + return len(d.InputObjectTypeDefinitions) - 1 +} + +func (d *Document) ImportInputObjectTypeDefinition(name, description string, argsRefs []int) (ref int) { + return d.ImportInputObjectTypeDefinitionWithDirectives(name, description, argsRefs, nil) +} + +func (d *Document) ImportInputObjectTypeDefinitionWithDirectives(name, description string, argsRefs []int, directiveRefs []int) (ref int) { + definition := InputObjectTypeDefinition{ + Description: d.ImportDescription(description), + Name: d.Input.AppendInputString(name), + HasInputFieldsDefinition: len(argsRefs) > 0, + InputFieldsDefinition: InputValueDefinitionList{ + Refs: argsRefs, + }, + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + directiveRefs, + }, + } + + ref = d.AddInputObjectTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindInputObjectTypeDefinition) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_extension.go new file mode 100644 index 00000000000..9daf83b81a2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_object_type_extension.go @@ -0,0 +1,62 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type InputObjectTypeExtension struct { + ExtendLiteral position.Position + InputObjectTypeDefinition +} + +func (d *Document) InputObjectTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.InputObjectTypeExtensions[ref].Name) +} + +func (d *Document) InputObjectTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.InputObjectTypeExtensions[ref].Name)) +} + +func (d *Document) InputObjectTypeExtensionDescriptionBytes(ref int) ByteSlice { + if !d.InputObjectTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.InputObjectTypeExtensions[ref].Description.Content) +} + +func (d *Document) InputObjectTypeExtensionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.InputObjectTypeExtensionDescriptionBytes(ref)) +} + +func (d *Document) InputObjectTypeExtensionHasInputFieldsDefinition(ref int) bool { + return d.InputObjectTypeExtensions[ref].HasInputFieldsDefinition +} + +func (d *Document) InputObjectTypeExtensionHasDirectives(ref int) bool { + return d.InputObjectTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendInputObjectTypeDefinitionByInputObjectTypeExtension(inputObjectTypeDefinitionRef, inputObjectTypeExtensionRef int) { + if d.InputObjectTypeExtensionHasDirectives(inputObjectTypeExtensionRef) { + d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].Directives.Refs = append(d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].Directives.Refs, d.InputObjectTypeExtensions[inputObjectTypeExtensionRef].Directives.Refs...) + d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].HasDirectives = true + } + + if d.InputObjectTypeExtensionHasInputFieldsDefinition(inputObjectTypeExtensionRef) { + d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].InputFieldsDefinition.Refs = append(d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].InputFieldsDefinition.Refs, d.InputObjectTypeExtensions[inputObjectTypeExtensionRef].InputFieldsDefinition.Refs...) + d.InputObjectTypeDefinitions[inputObjectTypeDefinitionRef].HasInputFieldsDefinition = true + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: inputObjectTypeExtensionRef, Kind: NodeKindInputObjectTypeExtension}) +} + +func (d *Document) ImportAndExtendInputObjectTypeDefinitionByInputObjectTypeExtension(inputObjectTypeExtensionRef int) { + d.ImportInputObjectTypeDefinitionWithDirectives( + d.InputObjectTypeExtensionNameString(inputObjectTypeExtensionRef), + d.InputObjectTypeExtensionDescriptionString(inputObjectTypeExtensionRef), + d.InputObjectTypeExtensions[inputObjectTypeExtensionRef].InputFieldsDefinition.Refs, + d.InputObjectTypeExtensions[inputObjectTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: inputObjectTypeExtensionRef, Kind: NodeKindInputObjectTypeExtension}) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_value_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_value_definition.go new file mode 100644 index 00000000000..e578e30d633 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_input_value_definition.go @@ -0,0 +1,95 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type InputValueDefinitionList struct { + LPAREN position.Position // ( + Refs []int // InputValueDefinition + RPAREN position.Position // ) +} + +type DefaultValue struct { + IsDefined bool + Equals position.Position // = + Value Value // e.g. "Foo" +} + +type InputValueDefinition struct { + Description Description // optional, e.g. "input Foo is..." + Name ByteSliceReference // e.g. Foo + Colon position.Position // : + Type int // e.g. String + DefaultValue DefaultValue // e.g. = "Bar" + HasDirectives bool + Directives DirectiveList // e.g. @baz +} + +func (d *Document) InputValueDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.InputValueDefinitions[ref].Name) +} + +func (d *Document) InputValueDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.InputValueDefinitions[ref].Name)) +} + +func (d *Document) InputValueDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.InputValueDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.InputValueDefinitions[ref].Description.Content) +} + +func (d *Document) InputValueDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.InputValueDefinitionDescriptionBytes(ref)) +} + +func (d *Document) InputValueDefinitionType(ref int) int { + return d.InputValueDefinitions[ref].Type +} + +func (d *Document) InputValueDefinitionHasDefaultValue(ref int) bool { + return d.InputValueDefinitions[ref].DefaultValue.IsDefined +} + +func (d *Document) InputValueDefinitionDefaultValue(ref int) Value { + return d.InputValueDefinitions[ref].DefaultValue.Value +} + +func (d *Document) InputValueDefinitionArgumentIsOptional(ref int) bool { + nonNull := d.Types[d.InputValueDefinitions[ref].Type].TypeKind == TypeKindNonNull + hasDefault := d.InputValueDefinitions[ref].DefaultValue.IsDefined + return !nonNull || hasDefault +} + +func (d *Document) InputValueDefinitionHasDirective(ref int, directiveName ByteSlice) bool { + if !d.InputValueDefinitions[ref].HasDirectives { + return false + } + for _, i := range d.InputValueDefinitions[ref].Directives.Refs { + if bytes.Equal(directiveName, d.DirectiveNameBytes(i)) { + return true + } + } + return false +} + +func (d *Document) AddInputValueDefinition(inputValueDefinition InputValueDefinition) (ref int) { + d.InputValueDefinitions = append(d.InputValueDefinitions, inputValueDefinition) + return len(d.InputValueDefinitions) - 1 +} + +func (d *Document) ImportInputValueDefinition(name, description string, typeRef int, defaultValue DefaultValue) (ref int) { + inputValueDef := InputValueDefinition{ + Description: d.ImportDescription(description), + Name: d.Input.AppendInputString(name), + Type: typeRef, + DefaultValue: defaultValue, + } + + return d.AddInputValueDefinition(inputValueDef) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_definition.go new file mode 100644 index 00000000000..6cb1f15145d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_definition.go @@ -0,0 +1,151 @@ +package ast + +import ( + "bytes" + "sort" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// InterfaceTypeDefinition +// example: +// interface NamedEntity { +// name: String +// } +type InterfaceTypeDefinition struct { + Description Description // optional, describes the interface + InterfaceLiteral position.Position // interface + Name ByteSliceReference // e.g. NamedEntity + ImplementsInterfaces TypeList // e.g implements Bar & Baz + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + HasFieldDefinitions bool + FieldsDefinition FieldDefinitionList // optional, e.g. { name: String } +} + +func (d *Document) InterfaceTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.InterfaceTypeDefinitions[ref].Name) +} + +func (d *Document) InterfaceTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.InterfaceTypeDefinitions[ref].Name)) +} + +func (d *Document) InterfaceTypeDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.InterfaceTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.InterfaceTypeDefinitions[ref].Description.Content) +} + +func (d *Document) InterfaceTypeDefinitionImplementsInterface(definitionRef int, interfaceName ByteSlice) bool { + for _, iRef := range d.InterfaceTypeDefinitions[definitionRef].ImplementsInterfaces.Refs { + implements := d.ResolveTypeNameBytes(iRef) + if bytes.Equal(interfaceName, implements) { + return true + } + } + return false +} + +func (d *Document) InterfaceTypeDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.InterfaceTypeDefinitionDescriptionBytes(ref)) +} + +// InterfaceTypeDefinitionImplementedByRootNodes will return all RootNodes that implement the given interface type (by ref) +func (d *Document) InterfaceTypeDefinitionImplementedByRootNodes(ref int) []Node { + interfaceTypeName := d.InterfaceTypeDefinitionNameBytes(ref) + implementingRootNodes := make(map[Node]bool) + for i := 0; i < len(d.RootNodes); i++ { + if d.RootNodes[i].Kind == NodeKindInterfaceTypeDefinition && d.RootNodes[i].Ref == ref { + continue + } + + var rootNodeInterfaceRefs []int + switch d.RootNodes[i].Kind { + case NodeKindObjectTypeDefinition: + if len(d.ObjectTypeDefinitions[d.RootNodes[i].Ref].ImplementsInterfaces.Refs) == 0 { + continue + } + rootNodeInterfaceRefs = d.ObjectTypeDefinitions[d.RootNodes[i].Ref].ImplementsInterfaces.Refs + case NodeKindInterfaceTypeDefinition: + if len(d.InterfaceTypeDefinitions[d.RootNodes[i].Ref].ImplementsInterfaces.Refs) == 0 { + continue + } + rootNodeInterfaceRefs = d.InterfaceTypeDefinitions[d.RootNodes[i].Ref].ImplementsInterfaces.Refs + default: + continue + } + + for j := 0; j < len(rootNodeInterfaceRefs); j++ { + implementedInterfaceTypeName := d.TypeNameBytes(rootNodeInterfaceRefs[j]) + if !interfaceTypeName.Equals(implementedInterfaceTypeName) { + continue + } + + var typeName ByteSlice + switch d.RootNodes[i].Kind { + case NodeKindObjectTypeDefinition: + typeName = d.ObjectTypeDefinitionNameBytes(d.RootNodes[i].Ref) + case NodeKindInterfaceTypeDefinition: + typeName = d.InterfaceTypeDefinitionNameBytes(d.RootNodes[i].Ref) + } + + node, exists := d.Index.FirstNodeByNameBytes(typeName) + if !exists { + continue + } + + _, isAlreadyAdded := implementingRootNodes[node] + if isAlreadyAdded { + continue + } + + implementingRootNodes[node] = true + } + } + + var nodes []Node + for mapNode := range implementingRootNodes { + nodes = append(nodes, mapNode) + } + + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Ref < nodes[j].Ref + }) + + return nodes +} + +func (d *Document) AddInterfaceTypeDefinition(definition InterfaceTypeDefinition) (ref int) { + d.InterfaceTypeDefinitions = append(d.InterfaceTypeDefinitions, definition) + return len(d.InterfaceTypeDefinitions) - 1 +} + +func (d *Document) ImportInterfaceTypeDefinition(name, description string, fieldRefs []int) (ref int) { + return d.ImportInterfaceTypeDefinitionWithDirectives(name, description, fieldRefs, nil, nil) +} + +func (d *Document) ImportInterfaceTypeDefinitionWithDirectives(name, description string, fieldRefs []int, iRefs []int, directiveRefs []int) (ref int) { + definition := InterfaceTypeDefinition{ + Name: d.Input.AppendInputString(name), + Description: d.ImportDescription(description), + FieldsDefinition: FieldDefinitionList{ + Refs: fieldRefs, + }, + ImplementsInterfaces: TypeList{ + Refs: iRefs, + }, + HasFieldDefinitions: len(fieldRefs) > 0, + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + ref = d.AddInterfaceTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindInterfaceTypeDefinition) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_extension.go new file mode 100644 index 00000000000..a3bb9771f17 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_interface_type_extension.go @@ -0,0 +1,75 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type InterfaceTypeExtension struct { + ExtendLiteral position.Position + InterfaceTypeDefinition +} + +func (d *Document) InterfaceTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.InterfaceTypeExtensions[ref].Name) +} + +func (d *Document) InterfaceTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.InterfaceTypeExtensions[ref].Name)) +} + +func (d *Document) InterfaceTypeExtensionDescriptionBytes(ref int) ByteSlice { + if !d.InterfaceTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.InterfaceTypeExtensions[ref].Description.Content) +} + +func (d *Document) InterfaceTypeExtensionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.InterfaceTypeExtensionDescriptionBytes(ref)) +} + +func (d *Document) InterfaceTypeExtensionHasFieldDefinitions(ref int) bool { + return d.InterfaceTypeExtensions[ref].HasFieldDefinitions +} + +func (d *Document) InterfaceTypeExtensionHasDirectives(ref int) bool { + return d.InterfaceTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendInterfaceTypeDefinitionByInterfaceTypeExtension(interfaceTypeDefinitionRef, interfaceTypeExtensionRef int) { + if d.InterfaceTypeExtensionHasFieldDefinitions(interfaceTypeExtensionRef) { + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].FieldsDefinition.Refs = append(d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].FieldsDefinition.Refs, d.InterfaceTypeExtensions[interfaceTypeExtensionRef].FieldsDefinition.Refs...) + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].HasFieldDefinitions = true + } + + if d.InterfaceTypeExtensionHasDirectives(interfaceTypeExtensionRef) { + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].Directives.Refs = append(d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].Directives.Refs, d.InterfaceTypeExtensions[interfaceTypeExtensionRef].Directives.Refs...) + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].HasDirectives = true + } + + if len(d.InterfaceTypeExtensions[interfaceTypeExtensionRef].ImplementsInterfaces.Refs) > 0 { + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].ImplementsInterfaces.Refs = append( + d.InterfaceTypeDefinitions[interfaceTypeDefinitionRef].ImplementsInterfaces.Refs, + d.InterfaceTypeExtensions[interfaceTypeExtensionRef].ImplementsInterfaces.Refs..., + ) + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: interfaceTypeExtensionRef, Kind: NodeKindInterfaceTypeExtension}) +} + +func (d *Document) ImportAndExtendInterfaceTypeDefinitionByInterfaceTypeExtension(interfaceTypeExtensionRef int) { + d.ImportInterfaceTypeDefinitionWithDirectives( + d.InterfaceTypeExtensionNameString(interfaceTypeExtensionRef), + d.InterfaceTypeExtensionDescriptionString(interfaceTypeExtensionRef), + d.InterfaceTypeExtensions[interfaceTypeExtensionRef].FieldsDefinition.Refs, + d.InterfaceTypeExtensions[interfaceTypeExtensionRef].ImplementsInterfaces.Refs, + d.InterfaceTypeExtensions[interfaceTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: interfaceTypeExtensionRef, Kind: NodeKindInterfaceTypeExtension}) +} + +func (d *Document) AddInterfaceTypeExtension(extension InterfaceTypeExtension) (ref int) { + d.InterfaceTypeExtensions = append(d.InterfaceTypeExtensions, extension) + return len(d.InterfaceTypeExtensions) - 1 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node.go new file mode 100644 index 00000000000..015fdd4b1ec --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node.go @@ -0,0 +1,542 @@ +package ast + +import ( + "bytes" + "fmt" + "log" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" +) + +type Node struct { + Kind NodeKind + Ref int +} + +var InvalidNode = Node{Kind: NodeKindUnknown, Ref: InvalidRef} + +func (n *Node) IsExtensionKind() bool { + switch n.Kind { + case NodeKindSchemaExtension, + NodeKindObjectTypeExtension, + NodeKindInputObjectTypeExtension, + NodeKindInterfaceTypeExtension, + NodeKindEnumTypeExtension, + NodeKindScalarTypeExtension, + NodeKindUnionTypeExtension: + return true + } + + return false +} + +func (d *Document) NodeNameBytes(node Node) ByteSlice { + var ref ByteSliceReference + + switch node.Kind { + case NodeKindObjectTypeDefinition: + ref = d.ObjectTypeDefinitions[node.Ref].Name + case NodeKindInterfaceTypeDefinition: + ref = d.InterfaceTypeDefinitions[node.Ref].Name + case NodeKindInputObjectTypeDefinition: + ref = d.InputObjectTypeDefinitions[node.Ref].Name + case NodeKindUnionTypeDefinition: + ref = d.UnionTypeDefinitions[node.Ref].Name + case NodeKindScalarTypeDefinition: + ref = d.ScalarTypeDefinitions[node.Ref].Name + case NodeKindDirectiveDefinition: + ref = d.DirectiveDefinitions[node.Ref].Name + case NodeKindEnumTypeDefinition: + ref = d.EnumTypeDefinitions[node.Ref].Name + case NodeKindField: + ref = d.Fields[node.Ref].Name + case NodeKindDirective: + ref = d.Directives[node.Ref].Name + case NodeKindObjectTypeExtension: + ref = d.ObjectTypeExtensions[node.Ref].Name + case NodeKindInterfaceTypeExtension: + ref = d.InterfaceTypeExtensions[node.Ref].Name + case NodeKindUnionTypeExtension: + ref = d.UnionTypeExtensions[node.Ref].Name + case NodeKindEnumTypeExtension: + ref = d.EnumTypeExtensions[node.Ref].Name + } + + return d.Input.ByteSlice(ref) +} + +func (n Node) NameBytes(definition *Document) []byte { + return definition.NodeNameBytes(n) +} + +func (n Node) NameString(definition *Document) string { + return unsafebytes.BytesToString(definition.NodeNameBytes(n)) +} + +func (d *Document) UpdateRootNode(ref int, newNodeRef int, newNodeKind NodeKind) { + d.RootNodes[ref].Kind = newNodeKind + d.RootNodes[ref].Ref = newNodeRef +} + +// TODO: we could use node name directly +func (d *Document) NodeNameUnsafeString(node Node) string { + return unsafebytes.BytesToString(d.NodeNameBytes(node)) +} + +func (d *Document) NodeNameString(node Node) string { + return string(d.NodeNameBytes(node)) +} + +// Node directives + +// NodeHasDirectiveByNameString returns whether the given node has a directive with the given name as string. +func (d *Document) NodeHasDirectiveByNameString(node Node, directiveName string) bool { + for _, directiveRef := range d.NodeDirectives(node) { + if d.DirectiveNameString(directiveRef) == directiveName { + return true + } + } + return false +} + +func (d *Document) NodeDirectives(node Node) []int { + switch node.Kind { + case NodeKindField: + return d.Fields[node.Ref].Directives.Refs + case NodeKindInlineFragment: + return d.InlineFragments[node.Ref].Directives.Refs + case NodeKindFragmentSpread: + return d.FragmentSpreads[node.Ref].Directives.Refs + case NodeKindSchemaDefinition: + return d.SchemaDefinitions[node.Ref].Directives.Refs + case NodeKindSchemaExtension: + return d.SchemaExtensions[node.Ref].Directives.Refs + case NodeKindObjectTypeDefinition: + return d.ObjectTypeDefinitions[node.Ref].Directives.Refs + case NodeKindObjectTypeExtension: + return d.ObjectTypeExtensions[node.Ref].Directives.Refs + case NodeKindFieldDefinition: + return d.FieldDefinitions[node.Ref].Directives.Refs + case NodeKindInterfaceTypeDefinition: + return d.InterfaceTypeDefinitions[node.Ref].Directives.Refs + case NodeKindInterfaceTypeExtension: + return d.InterfaceTypeExtensions[node.Ref].Directives.Refs + case NodeKindInputObjectTypeDefinition: + return d.InputObjectTypeDefinitions[node.Ref].Directives.Refs + case NodeKindInputObjectTypeExtension: + return d.InputObjectTypeExtensions[node.Ref].Directives.Refs + case NodeKindScalarTypeDefinition: + return d.ScalarTypeDefinitions[node.Ref].Directives.Refs + case NodeKindScalarTypeExtension: + return d.ScalarTypeExtensions[node.Ref].Directives.Refs + case NodeKindUnionTypeDefinition: + return d.UnionTypeDefinitions[node.Ref].Directives.Refs + case NodeKindUnionTypeExtension: + return d.UnionTypeExtensions[node.Ref].Directives.Refs + case NodeKindEnumTypeDefinition: + return d.EnumTypeDefinitions[node.Ref].Directives.Refs + case NodeKindEnumTypeExtension: + return d.EnumTypeExtensions[node.Ref].Directives.Refs + case NodeKindFragmentDefinition: + return d.FragmentDefinitions[node.Ref].Directives.Refs + case NodeKindInputValueDefinition: + return d.InputValueDefinitions[node.Ref].Directives.Refs + case NodeKindEnumValueDefinition: + return d.EnumValueDefinitions[node.Ref].Directives.Refs + case NodeKindVariableDefinition: + return d.VariableDefinitions[node.Ref].Directives.Refs + case NodeKindOperationDefinition: + return d.OperationDefinitions[node.Ref].Directives.Refs + default: + return nil + } +} + +func (d *Document) RemoveDirectivesFromNode(node Node, directiveRefs []int) { + for _, ref := range directiveRefs { + d.RemoveDirectiveFromNode(node, ref) + } +} + +func (d *Document) RemoveDirectiveFromNode(node Node, directiveRef int) { + switch node.Kind { + case NodeKindFragmentSpread: + if i, ok := indexOf(d.FragmentSpreads[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.FragmentSpreads[node.Ref].Directives.Refs, i) + d.FragmentSpreads[node.Ref].HasDirectives = len(d.FragmentSpreads[node.Ref].Directives.Refs) > 0 + } + case NodeKindInlineFragment: + if i, ok := indexOf(d.InlineFragments[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.InlineFragments[node.Ref].Directives.Refs, i) + d.InlineFragments[node.Ref].HasDirectives = len(d.InlineFragments[node.Ref].Directives.Refs) > 0 + } + case NodeKindField: + if i, ok := indexOf(d.Fields[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.Fields[node.Ref].Directives.Refs, i) + d.Fields[node.Ref].HasDirectives = len(d.Fields[node.Ref].Directives.Refs) > 0 + } + case NodeKindFieldDefinition: + if i, ok := indexOf(d.FieldDefinitions[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.FieldDefinitions[node.Ref].Directives.Refs, i) + d.FieldDefinitions[node.Ref].HasDirectives = len(d.FieldDefinitions[node.Ref].Directives.Refs) > 0 + } + case NodeKindInterfaceTypeDefinition: + if i, ok := indexOf(d.InterfaceTypeDefinitions[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.InterfaceTypeDefinitions[node.Ref].Directives.Refs, i) + d.InterfaceTypeDefinitions[node.Ref].HasDirectives = len(d.InterfaceTypeDefinitions[node.Ref].Directives.Refs) > 0 + } + case NodeKindObjectTypeDefinition: + if i, ok := indexOf(d.ObjectTypeDefinitions[node.Ref].Directives.Refs, directiveRef); ok { + deleteRef(&d.ObjectTypeDefinitions[node.Ref].Directives.Refs, i) + d.ObjectTypeDefinitions[node.Ref].HasDirectives = len(d.ObjectTypeDefinitions[node.Ref].Directives.Refs) > 0 + } + default: + log.Printf("RemoveDirectiveFromNode not implemented for node kind: %s", node.Kind) + } +} + +func (d *Document) NodeDirectiveLocation(node Node) (location DirectiveLocation, err error) { + switch node.Kind { + case NodeKindSchemaDefinition: + location = TypeSystemDirectiveLocationSchema + case NodeKindSchemaExtension: + location = TypeSystemDirectiveLocationSchema + case NodeKindObjectTypeDefinition: + location = TypeSystemDirectiveLocationObject + case NodeKindObjectTypeExtension: + location = TypeSystemDirectiveLocationObject + case NodeKindInterfaceTypeDefinition: + location = TypeSystemDirectiveLocationInterface + case NodeKindInterfaceTypeExtension: + location = TypeSystemDirectiveLocationInterface + case NodeKindUnionTypeDefinition: + location = TypeSystemDirectiveLocationUnion + case NodeKindUnionTypeExtension: + location = TypeSystemDirectiveLocationUnion + case NodeKindEnumTypeDefinition: + location = TypeSystemDirectiveLocationEnum + case NodeKindEnumTypeExtension: + location = TypeSystemDirectiveLocationEnum + case NodeKindInputObjectTypeDefinition: + location = TypeSystemDirectiveLocationInputObject + case NodeKindInputObjectTypeExtension: + location = TypeSystemDirectiveLocationInputObject + case NodeKindScalarTypeDefinition: + location = TypeSystemDirectiveLocationScalar + case NodeKindOperationDefinition: + switch d.OperationDefinitions[node.Ref].OperationType { + case OperationTypeQuery: + location = ExecutableDirectiveLocationQuery + case OperationTypeMutation: + location = ExecutableDirectiveLocationMutation + case OperationTypeSubscription: + location = ExecutableDirectiveLocationSubscription + } + case NodeKindField: + location = ExecutableDirectiveLocationField + case NodeKindFragmentSpread: + location = ExecutableDirectiveLocationFragmentSpread + case NodeKindInlineFragment: + location = ExecutableDirectiveLocationInlineFragment + case NodeKindFragmentDefinition: + location = ExecutableDirectiveLocationFragmentDefinition + case NodeKindVariableDefinition: + location = ExecutableDirectiveLocationVariableDefinition + default: + err = fmt.Errorf("node kind: %s is not allowed to have directives", node.Kind) + } + return +} + +// Node resolvers + +// NodeResolverTypeNameBytes returns lowercase query/mutation/subscription for Query/Mutation/Subscription +// for other type definitions it returns the default type name +func (d *Document) NodeResolverTypeNameBytes(node Node, path Path) ByteSlice { + if len(path) == 1 && path[0].Kind == FieldName { + return path[0].FieldName + } + switch node.Kind { + case NodeKindObjectTypeDefinition: + return d.ObjectTypeDefinitionNameBytes(node.Ref) + case NodeKindInterfaceTypeDefinition: + return d.InterfaceTypeDefinitionNameBytes(node.Ref) + case NodeKindUnionTypeDefinition: + return d.UnionTypeDefinitionNameBytes(node.Ref) + } + return nil +} + +func (d *Document) NodeResolverTypeNameString(node Node, path Path) string { + return unsafebytes.BytesToString(d.NodeResolverTypeNameBytes(node, path)) +} + +// Node field definitions + +func (d *Document) NodeFieldDefinitions(node Node) []int { + switch node.Kind { + case NodeKindObjectTypeDefinition: + return d.ObjectTypeDefinitions[node.Ref].FieldsDefinition.Refs + case NodeKindObjectTypeExtension: + return d.ObjectTypeExtensions[node.Ref].FieldsDefinition.Refs + case NodeKindInterfaceTypeDefinition: + return d.InterfaceTypeDefinitions[node.Ref].FieldsDefinition.Refs + case NodeKindInterfaceTypeExtension: + return d.InterfaceTypeExtensions[node.Ref].FieldsDefinition.Refs + case NodeKindUnionTypeDefinition: + return d.UnionTypeDefinitions[node.Ref].FieldsDefinition.Refs + default: + return nil + } +} + +func (d *Document) NodeInputFieldDefinitions(node Node) []int { + switch node.Kind { + case NodeKindInputObjectTypeDefinition: + return d.InputObjectTypeDefinitions[node.Ref].InputFieldsDefinition.Refs + default: + return nil + } +} + +func (d *Document) NodeInputFieldDefinitionByName(node Node, name ByteSlice) (int, bool) { + switch node.Kind { + case NodeKindInputObjectTypeDefinition: + refs := d.InputObjectTypeDefinitions[node.Ref].InputFieldsDefinition.Refs + for _, ref := range refs { + if bytes.Equal(d.Input.ByteSlice(d.InputValueDefinitions[ref].Name), name) { + return ref, true + } + } + } + return 0, false +} + +func (d *Document) NodeFieldDefinitionByName(node Node, fieldName ByteSlice) (definition int, exists bool) { + for _, i := range d.NodeFieldDefinitions(node) { + if bytes.Equal(d.Input.ByteSlice(d.FieldDefinitions[i].Name), fieldName) { + return i, true + } + } + return InvalidRef, false +} + +func (d *Document) NodeFieldDefinitionArgumentDefinitionByName(node Node, fieldName, argumentName ByteSlice) int { + fieldDefinition, exists := d.NodeFieldDefinitionByName(node, fieldName) + if !exists { + return -1 + } + argumentDefinitions := d.FieldDefinitionArgumentsDefinitions(fieldDefinition) + for _, i := range argumentDefinitions { + if bytes.Equal(argumentName, d.Input.ByteSlice(d.InputValueDefinitions[i].Name)) { + return i + } + } + return -1 +} + +func (d *Document) NodeFieldDefinitionArgumentsDefinitions(node Node, fieldName ByteSlice) []int { + fieldDefinition, exists := d.NodeFieldDefinitionByName(node, fieldName) + if !exists { + return nil + } + return d.FieldDefinitionArgumentsDefinitions(fieldDefinition) +} + +// Node input value definitions + +func (d *Document) NodeInputValueDefinitions(node Node) []int { + switch node.Kind { + case NodeKindInputObjectTypeDefinition: + return d.InputObjectTypeDefinitions[node.Ref].InputFieldsDefinition.Refs + case NodeKindInputObjectTypeExtension: + return d.InputObjectTypeExtensions[node.Ref].InputFieldsDefinition.Refs + case NodeKindFieldDefinition: + return d.FieldDefinitions[node.Ref].ArgumentsDefinition.Refs + case NodeKindDirectiveDefinition: + return d.DirectiveDefinitions[node.Ref].ArgumentsDefinition.Refs + default: + return nil + } +} + +func (d *Document) InputValueDefinitionIsFirst(inputValue int, ancestor Node) bool { + inputValues := d.NodeInputValueDefinitions(ancestor) + return inputValues != nil && inputValues[0] == inputValue +} + +func (d *Document) InputValueDefinitionIsLast(inputValue int, ancestor Node) bool { + inputValues := d.NodeInputValueDefinitions(ancestor) + return inputValues != nil && inputValues[len(inputValues)-1] == inputValue +} + +// Node misc + +func (d *Document) NodeImplementsInterface(node Node, interfaceNode Node) bool { + nodeFields := d.NodeFieldDefinitions(node) + interfaceFields := d.NodeFieldDefinitions(interfaceNode) + + for _, i := range interfaceFields { + interfaceFieldName := d.FieldDefinitionNameBytes(i) + if !d.FieldDefinitionsContainField(nodeFields, interfaceFieldName) { + return false + } + } + + return true +} + +func (d *Document) NodeIsUnionMember(node Node, union Node) bool { + nodeTypeName := d.NodeNameBytes(node) + for _, i := range d.UnionTypeDefinitions[union.Ref].UnionMemberTypes.Refs { + memberName := d.ResolveTypeNameBytes(i) + if bytes.Equal(nodeTypeName, memberName) { + return true + } + } + return false +} + +func (d *Document) NodeIsLastRootNode(node Node) bool { + if len(d.RootNodes) == 0 { + return false + } + for i := len(d.RootNodes) - 1; i >= 0; i-- { + if d.RootNodes[i].Kind == NodeKindUnknown { + continue + } + return d.RootNodes[i] == node + } + return false +} + +func (d *Document) RemoveNodeFromNode(remove, from Node) { + switch from.Kind { + case NodeKindSelectionSet: + d.RemoveNodeFromSelectionSet(from.Ref, remove) + default: + log.Printf("RemoveNodeFromNode not implemented for from: %s", from.Kind) + } +} + +func (d *Document) RemoveNodeFromSelectionSet(set int, node Node) { + var selectionKind SelectionKind + + switch node.Kind { + case NodeKindFragmentSpread: + selectionKind = SelectionKindFragmentSpread + case NodeKindInlineFragment: + selectionKind = SelectionKindInlineFragment + case NodeKindField: + selectionKind = SelectionKindField + default: + log.Printf("RemoveNodeFromSelectionSet not implemented for node: %s", node.Kind) + return + } + + for i, j := range d.SelectionSets[set].SelectionRefs { + if d.Selections[j].Kind == selectionKind && d.Selections[j].Ref == node.Ref { + d.SelectionSets[set].SelectionRefs = append(d.SelectionSets[set].SelectionRefs[:i], d.SelectionSets[set].SelectionRefs[i+1:]...) + return + } + } +} + +// NodeInterfaceRefs returns the interfaces implemented by the given node (this is +// only applicable to object kinds). +// Returns nil if node kind is not an object kind. +func (d *Document) NodeInterfaceRefs(node Node) (refs []int) { + switch node.Kind { + case NodeKindObjectTypeDefinition: + return d.ObjectTypeDefinitions[node.Ref].ImplementsInterfaces.Refs + case NodeKindObjectTypeExtension: + return d.ObjectTypeExtensions[node.Ref].ImplementsInterfaces.Refs + default: + return nil + } +} + +// NodeUnionMemberRefs returns the union members of the given node (this is only +// applicable to union kinds). +// Returns nil if node kind is not an object kind. +func (d *Document) NodeUnionMemberRefs(node Node) (refs []int) { + switch node.Kind { + case NodeKindUnionTypeDefinition: + return d.UnionTypeDefinitions[node.Ref].UnionMemberTypes.Refs + case NodeKindUnionTypeExtension: + return d.UnionTypeExtensions[node.Ref].UnionMemberTypes.Refs + default: + return nil + } +} + +// Node fragments + +func (d *Document) NodeFragmentIsAllowedOnNode(fragmentNode, onNode Node) bool { + switch onNode.Kind { + case NodeKindObjectTypeDefinition: + return d.NodeFragmentIsAllowedOnObjectTypeDefinition(fragmentNode, onNode) + case NodeKindInterfaceTypeDefinition: + return d.NodeFragmentIsAllowedOnInterfaceTypeDefinition(fragmentNode, onNode) + case NodeKindUnionTypeDefinition: + return d.NodeFragmentIsAllowedOnUnionTypeDefinition(fragmentNode, onNode) + default: + return false + } +} + +func (d *Document) NodeFragmentIsAllowedOnInterfaceTypeDefinition(fragmentNode, interfaceTypeNode Node) bool { + switch fragmentNode.Kind { + case NodeKindObjectTypeDefinition: + return d.NodeImplementsInterface(fragmentNode, interfaceTypeNode) + case NodeKindInterfaceTypeDefinition: + return bytes.Equal(d.InterfaceTypeDefinitionNameBytes(fragmentNode.Ref), d.InterfaceTypeDefinitionNameBytes(interfaceTypeNode.Ref)) + case NodeKindUnionTypeDefinition: + return d.UnionNodeIntersectsInterfaceNode(fragmentNode, interfaceTypeNode) + } + + return false +} + +func (d *Document) NodeFragmentIsAllowedOnUnionTypeDefinition(fragmentNode, unionTypeNode Node) bool { + switch fragmentNode.Kind { + case NodeKindObjectTypeDefinition: + return d.NodeIsUnionMember(fragmentNode, unionTypeNode) + case NodeKindInterfaceTypeDefinition: + return d.UnionNodeIntersectsInterfaceNode(unionTypeNode, fragmentNode) + case NodeKindUnionTypeDefinition: + return bytes.Equal(d.UnionTypeDefinitionNameBytes(fragmentNode.Ref), d.UnionTypeDefinitionNameBytes(unionTypeNode.Ref)) + } + + return false +} + +func (d *Document) NodeFragmentIsAllowedOnObjectTypeDefinition(fragmentNode, objectTypeNode Node) bool { + switch fragmentNode.Kind { + case NodeKindObjectTypeDefinition: + return bytes.Equal(d.ObjectTypeDefinitionNameBytes(fragmentNode.Ref), d.ObjectTypeDefinitionNameBytes(objectTypeNode.Ref)) + case NodeKindInterfaceTypeDefinition: + return d.NodeImplementsInterface(objectTypeNode, fragmentNode) + case NodeKindUnionTypeDefinition: + return d.NodeIsUnionMember(objectTypeNode, fragmentNode) + } + + return false +} + +func (d *Document) UnionNodeIntersectsInterfaceNode(unionNode, interfaceNode Node) bool { + for _, i := range d.UnionTypeDefinitions[unionNode.Ref].UnionMemberTypes.Refs { + memberName := d.ResolveTypeNameBytes(i) + node, exists := d.Index.FirstNodeByNameBytes(memberName) + if !exists { + continue + } + if node.Kind != NodeKindObjectTypeDefinition { + continue + } + if d.NodeImplementsInterface(node, interfaceNode) { + return true + } + } + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node_kind.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node_kind.go new file mode 100644 index 00000000000..2b8312a40b0 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_node_kind.go @@ -0,0 +1,90 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type NodeKind int + +const ( + NodeKindUnknown NodeKind = 22 + iota + NodeKindSchemaDefinition + NodeKindSchemaExtension + NodeKindObjectTypeDefinition + NodeKindObjectTypeExtension + NodeKindInterfaceTypeDefinition + NodeKindInterfaceTypeExtension + NodeKindUnionTypeDefinition + NodeKindUnionTypeExtension + NodeKindUnionMemberType + NodeKindEnumTypeDefinition + NodeKindEnumValueDefinition + NodeKindEnumTypeExtension + NodeKindInputObjectTypeDefinition + NodeKindInputValueDefinition + NodeKindInputObjectTypeExtension + NodeKindScalarTypeDefinition + NodeKindScalarTypeExtension + NodeKindDirectiveDefinition + NodeKindOperationDefinition + NodeKindSelectionSet + NodeKindField + NodeKindFieldDefinition + NodeKindFragmentSpread + NodeKindInlineFragment + NodeKindFragmentDefinition + NodeKindArgument + NodeKindDirective + NodeKindVariableDefinition +) + +func (n NodeKind) IsAbstractType() bool { + return n == NodeKindInterfaceTypeDefinition || n == NodeKindUnionTypeDefinition +} + +func (d *Document) NodeKindNameBytes(node Node) ByteSlice { + switch node.Kind { + case NodeKindOperationDefinition: + switch d.OperationDefinitions[node.Ref].OperationType { + case OperationTypeQuery: + return literal.LocationQuery + case OperationTypeMutation: + return literal.LocationMutation + case OperationTypeSubscription: + return literal.LocationSubscription + } + case NodeKindField: + return literal.LocationField + case NodeKindFragmentDefinition: + return literal.LocationFragmentDefinition + case NodeKindFragmentSpread: + return literal.LocationFragmentSpread + case NodeKindInlineFragment: + return literal.LocationInlineFragment + case NodeKindVariableDefinition: + return literal.LocationVariableDefinition + case NodeKindSchemaDefinition: + return literal.LocationSchema + case NodeKindScalarTypeDefinition: + return literal.LocationScalar + case NodeKindObjectTypeDefinition: + return literal.LocationObject + case NodeKindFieldDefinition: + return literal.LocationFieldDefinition + case NodeKindInterfaceTypeDefinition: + return literal.LocationInterface + case NodeKindUnionTypeDefinition: + return literal.LocationUnion + case NodeKindEnumTypeDefinition: + return literal.LocationEnum + case NodeKindEnumValueDefinition: + return literal.LocationEnumValue + case NodeKindInputObjectTypeDefinition: + return literal.LocationInputObject + case NodeKindInputValueDefinition: + return literal.LocationInputFieldDefinition + } + + return unsafebytes.StringToBytes(node.Kind.String()) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_field.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_field.go new file mode 100644 index 00000000000..eaeddc44cdc --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_field.go @@ -0,0 +1,75 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// ObjectField +// example: +// lon: 12.43 +type ObjectField struct { + Name ByteSliceReference // e.g. lon + Colon position.Position // : + Value Value // e.g. 12.43 + Position position.Position +} + +func (d *Document) CopyObjectField(ref int) int { + return d.AddObjectField(ObjectField{ + Name: d.copyByteSliceReference(d.ObjectFields[ref].Name), + Value: Value{ + Kind: d.ObjectFields[ref].Value.Kind, + Ref: d.copyValueRef(d.ObjectFields[ref].Value.Kind, d.ObjectFields[ref].Value.Ref), + }, + }) +} + +func (d *Document) ObjectField(ref int) ObjectField { + return d.ObjectFields[ref] +} + +func (d *Document) ObjectFieldNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.ObjectFields[ref].Name) +} + +func (d *Document) ObjectFieldNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.ObjectFields[ref].Name)) +} + +func (d *Document) ObjectFieldValue(ref int) Value { + return d.ObjectFields[ref].Value +} + +func (d *Document) ObjectFieldsAreEqual(left, right int) bool { + return bytes.Equal(d.ObjectFieldNameBytes(left), d.ObjectFieldNameBytes(right)) && + d.ValuesAreEqual(d.ObjectFieldValue(left), d.ObjectFieldValue(right)) +} + +func (d *Document) ObjectValuesAreEqual(left, right int) bool { + leftFields, rightFields := d.ObjectValues[left].Refs, d.ObjectValues[right].Refs + if len(leftFields) != len(rightFields) { + return false + } + for i := 0; i < len(leftFields); i++ { + left, right = leftFields[i], rightFields[i] + if !d.ObjectFieldsAreEqual(left, right) { + return false + } + } + return true +} + +func (d *Document) AddObjectField(field ObjectField) (ref int) { + d.ObjectFields = append(d.ObjectFields, field) + return len(d.ObjectFields) - 1 +} + +func (d *Document) ImportObjectField(name ByteSlice, value Value) (ref int) { + return d.AddObjectField(ObjectField{ + Name: d.Input.AppendInputBytes(name), + Value: value, + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_definition.go new file mode 100644 index 00000000000..5b93c73555a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_definition.go @@ -0,0 +1,119 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type TypeList struct { + Refs []int // Type +} + +type ObjectTypeDefinition struct { + Description Description // optional, e.g. "type Foo is ..." + TypeLiteral position.Position // type + Name ByteSliceReference // e.g. Foo + ImplementsInterfaces TypeList // e.g implements Bar & Baz + HasDirectives bool + Directives DirectiveList // e.g. @foo + HasFieldDefinitions bool + FieldsDefinition FieldDefinitionList // { foo:Bar bar(baz:String) } +} + +func (d *Document) ObjectTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.ObjectTypeDefinitions[ref].Name) +} + +func (d *Document) ObjectTypeDefinitionNameRef(ref int) ByteSliceReference { + return d.ObjectTypeDefinitions[ref].Name +} + +func (d *Document) ObjectTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.ObjectTypeDefinitions[ref].Name)) +} + +func (d *Document) ObjectTypeDescriptionNameBytes(ref int) ByteSlice { + if !d.ObjectTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.ObjectTypeDefinitions[ref].Description.Content) +} + +func (d *Document) ObjectTypeDescriptionNameString(ref int) string { + return unsafebytes.BytesToString(d.ObjectTypeDescriptionNameBytes(ref)) +} + +func (d *Document) ObjectTypeDefinitionHasField(ref int, fieldName []byte) bool { + for _, fieldDefinitionRef := range d.ObjectTypeDefinitions[ref].FieldsDefinition.Refs { + currentFieldName := d.FieldDefinitionNameBytes(fieldDefinitionRef) + if currentFieldName.Equals(fieldName) { + return true + } + } + return false +} + +func (d *Document) ObjectTypeDefinitionImplementsInterface(definitionRef int, interfaceName ByteSlice) bool { + for _, iRef := range d.ObjectTypeDefinitions[definitionRef].ImplementsInterfaces.Refs { + implements := d.ResolveTypeNameBytes(iRef) + if bytes.Equal(interfaceName, implements) { + return true + } + } + return false +} + +func (d *Document) AddObjectTypeDefinition(definition ObjectTypeDefinition) (ref int) { + d.ObjectTypeDefinitions = append(d.ObjectTypeDefinitions, definition) + return len(d.ObjectTypeDefinitions) - 1 +} + +func (d *Document) ImportObjectTypeDefinition(name, description string, fieldRefs []int, iRefs []int) (ref int) { + return d.ImportObjectTypeDefinitionWithDirectives(name, description, fieldRefs, iRefs, nil) +} + +func (d *Document) ImportObjectTypeDefinitionWithDirectives(name, description string, fieldRefs []int, iRefs []int, directiveRefs []int) (ref int) { + definition := ObjectTypeDefinition{ + Name: d.Input.AppendInputString(name), + Description: d.ImportDescription(description), + FieldsDefinition: FieldDefinitionList{ + Refs: fieldRefs, + }, + HasFieldDefinitions: len(fieldRefs) > 0, + ImplementsInterfaces: TypeList{ + Refs: iRefs, + }, + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + ref = d.AddObjectTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindObjectTypeDefinition) + + return +} + +func (d *Document) RemoveObjectTypeDefinition(name ByteSlice) bool { + node, ok := d.Index.FirstNodeByNameBytes(name) + if !ok { + return false + } + + if node.Kind != NodeKindObjectTypeDefinition { + return false + } + + for i := range d.RootNodes { + if d.RootNodes[i].Kind == NodeKindObjectTypeDefinition && d.RootNodes[i].Ref == node.Ref { + d.RootNodes = append(d.RootNodes[:i], d.RootNodes[i+1:]...) + break + } + } + + d.Index.RemoveNodeByName(name) + return true +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_extension.go new file mode 100644 index 00000000000..6d25d676457 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_object_type_extension.go @@ -0,0 +1,75 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type ObjectTypeExtension struct { + ExtendLiteral position.Position + ObjectTypeDefinition +} + +func (d *Document) ObjectTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.ObjectTypeExtensions[ref].Name) +} + +func (d *Document) ObjectTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.ObjectTypeExtensions[ref].Name)) +} + +func (d *Document) ObjectTypeExtensionDescriptionNameBytes(ref int) ByteSlice { + if !d.ObjectTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.ObjectTypeExtensions[ref].Description.Content) +} + +func (d *Document) ObjectTypeExtensionDescriptionNameString(ref int) string { + return unsafebytes.BytesToString(d.ObjectTypeExtensionDescriptionNameBytes(ref)) +} + +func (d *Document) ObjectTypeExtensionHasFieldDefinitions(ref int) bool { + return d.ObjectTypeExtensions[ref].HasFieldDefinitions +} + +func (d *Document) ObjectTypeExtensionHasDirectives(ref int) bool { + return d.ObjectTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendObjectTypeDefinitionByObjectTypeExtension(objectTypeDefinitionRef, objectTypeExtensionRef int) { + if d.ObjectTypeExtensionHasFieldDefinitions(objectTypeExtensionRef) { + d.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs = append(d.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs, d.ObjectTypeExtensions[objectTypeExtensionRef].FieldsDefinition.Refs...) + d.ObjectTypeDefinitions[objectTypeDefinitionRef].HasFieldDefinitions = true + } + + if d.ObjectTypeExtensionHasDirectives(objectTypeExtensionRef) { + d.ObjectTypeDefinitions[objectTypeDefinitionRef].Directives.Refs = append(d.ObjectTypeDefinitions[objectTypeDefinitionRef].Directives.Refs, d.ObjectTypeExtensions[objectTypeExtensionRef].Directives.Refs...) + d.ObjectTypeDefinitions[objectTypeDefinitionRef].HasDirectives = true + } + + if len(d.ObjectTypeExtensions[objectTypeExtensionRef].ImplementsInterfaces.Refs) > 0 { + d.ObjectTypeDefinitions[objectTypeDefinitionRef].ImplementsInterfaces.Refs = append( + d.ObjectTypeDefinitions[objectTypeDefinitionRef].ImplementsInterfaces.Refs, + d.ObjectTypeExtensions[objectTypeExtensionRef].ImplementsInterfaces.Refs..., + ) + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: objectTypeExtensionRef, Kind: NodeKindObjectTypeExtension}) +} + +func (d *Document) ImportAndExtendObjectTypeDefinitionByObjectTypeExtension(objectTypeExtensionRef int) { + d.ImportObjectTypeDefinitionWithDirectives( + d.ObjectTypeExtensionNameBytes(objectTypeExtensionRef).String(), + d.ObjectTypeExtensionDescriptionNameString(objectTypeExtensionRef), + d.ObjectTypeExtensions[objectTypeExtensionRef].FieldsDefinition.Refs, + d.ObjectTypeExtensions[objectTypeExtensionRef].ImplementsInterfaces.Refs, + d.ObjectTypeExtensions[objectTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: objectTypeExtensionRef, Kind: NodeKindObjectTypeExtension}) +} + +func (d *Document) AddObjectTypeDefinitionExtension(extension ObjectTypeExtension) (ref int) { + d.ObjectTypeExtensions = append(d.ObjectTypeExtensions, extension) + return len(d.ObjectTypeExtensions) - 1 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_operation_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_operation_definition.go new file mode 100644 index 00000000000..2c9efa932c4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_operation_definition.go @@ -0,0 +1,126 @@ +package ast + +import ( + "math" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type OperationType int + +const ( + OperationTypeUnknown OperationType = iota + OperationTypeQuery + OperationTypeMutation + OperationTypeSubscription +) + +type OperationDefinition struct { + OperationType OperationType // one of query, mutation, subscription + OperationTypeLiteral position.Position // position of the operation type literal, if present + Name ByteSliceReference // optional, user defined name of the operation + HasVariableDefinitions bool + VariableDefinitions VariableDefinitionList // optional, e.g. ($devicePicSize: Int) + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + SelectionSet int // e.g. {field} + HasSelections bool +} + +func (d *Document) OperationDefinitionHasVariableDefinition(ref int, variableName string) bool { + for _, i := range d.OperationDefinitions[ref].VariableDefinitions.Refs { + value := d.VariableDefinitions[i].VariableValue.Ref + if variableName == d.VariableValueNameString(value) { + return true + } + } + return false +} + +func (d *Document) OperationDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.OperationDefinitions[ref].Name) +} + +func (d *Document) OperationDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.OperationDefinitions[ref].Name)) +} + +func (d *Document) AddOperationDefinitionToRootNodes(definition OperationDefinition) Node { + d.OperationDefinitions = append(d.OperationDefinitions, definition) + node := Node{Kind: NodeKindOperationDefinition, Ref: len(d.OperationDefinitions) - 1} + d.RootNodes = append(d.RootNodes, node) + return node +} + +func (d *Document) AddVariableDefinitionToOperationDefinition(operationDefinitionRef, variableValueRef, typeRef int) { + if !d.OperationDefinitions[operationDefinitionRef].HasVariableDefinitions { + d.OperationDefinitions[operationDefinitionRef].HasVariableDefinitions = true + d.OperationDefinitions[operationDefinitionRef].VariableDefinitions.Refs = d.Refs[d.NextRefIndex()][:0] + } + variableDefinition := VariableDefinition{ + VariableValue: Value{ + Kind: ValueKindVariable, + Ref: variableValueRef, + }, + Type: typeRef, + } + d.VariableDefinitions = append(d.VariableDefinitions, variableDefinition) + ref := len(d.VariableDefinitions) - 1 + d.OperationDefinitions[operationDefinitionRef].VariableDefinitions.Refs = + append(d.OperationDefinitions[operationDefinitionRef].VariableDefinitions.Refs, ref) +} + +func (d *Document) AddImportedVariableDefinitionToOperationDefinition(operationDefinition, variableDefinition int) { + if !d.OperationDefinitions[operationDefinition].HasVariableDefinitions { + d.OperationDefinitions[operationDefinition].HasVariableDefinitions = true + d.OperationDefinitions[operationDefinition].VariableDefinitions.Refs = d.Refs[d.NextRefIndex()][:0] + } + d.OperationDefinitions[operationDefinition].VariableDefinitions.Refs = + append(d.OperationDefinitions[operationDefinition].VariableDefinitions.Refs, variableDefinition) +} + +func (d *Document) OperationNameExists(operationName string) bool { + for i := range d.RootNodes { + if d.RootNodes[i].Kind != NodeKindOperationDefinition { + continue + } + if d.OperationDefinitionNameString(d.RootNodes[i].Ref) == operationName { + return true + } + } + + return false +} + +func (d *Document) NumOfOperationDefinitions() (n int) { + for i := range d.RootNodes { + if d.RootNodes[i].Kind == NodeKindOperationDefinition { + n++ + } + } + return +} + +const ( + alphabet = `abcdefghijklmnopqrstuvwxyz` +) + +func (d *Document) GenerateUnusedVariableDefinitionName(operationDefinition int) []byte { + var i, k int64 + + for i = 1; i < math.MaxInt64; i++ { + out := make([]byte, i) + for j := range alphabet { + for k = 0; k < i; k++ { + out[k] = alphabet[j] + } + _, exists := d.VariableDefinitionByNameAndOperation(operationDefinition, out) + if !exists { + return out + } + } + } + + return nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_root_operation_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_root_operation_type_definition.go new file mode 100644 index 00000000000..3185d61804b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_root_operation_type_definition.go @@ -0,0 +1,148 @@ +package ast + +import ( + "bytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +var DefaultQueryTypeName = []byte("Query") +var DefaultMutationTypeName = []byte("Mutation") +var DefaultSubscriptionTypeName = []byte("Subscription") + +type RootOperationTypeDefinitionList struct { + LBrace position.Position // { + Refs []int // RootOperationTypeDefinition + RBrace position.Position // } +} + +type RootOperationTypeDefinition struct { + OperationType OperationType // one of query, mutation, subscription + Colon position.Position // : + NamedType Type // e.g. Query +} + +func (d *Document) RootOperationTypeDefinitionNameString(ref int) string { + return d.RootOperationTypeDefinitions[ref].OperationType.String() +} + +func (d *Document) RootOperationTypeDefinitionIsFirstInSchemaDefinition(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindSchemaDefinition: + if len(d.SchemaDefinitions[ancestor.Ref].RootOperationTypeDefinitions.Refs) == 0 { + return false + } + return ref == d.SchemaDefinitions[ancestor.Ref].RootOperationTypeDefinitions.Refs[0] + case NodeKindSchemaExtension: + if len(d.SchemaExtensions[ancestor.Ref].RootOperationTypeDefinitions.Refs) == 0 { + return false + } + return ref == d.SchemaExtensions[ancestor.Ref].RootOperationTypeDefinitions.Refs[0] + default: + return false + } +} + +func (d *Document) RootOperationTypeDefinitionIsLastInSchemaDefinition(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindSchemaDefinition: + return d.SchemaDefinitions[ancestor.Ref].RootOperationTypeDefinitions.Refs[len(d.SchemaDefinitions[ancestor.Ref].RootOperationTypeDefinitions.Refs)-1] == ref + case NodeKindSchemaExtension: + return d.SchemaExtensions[ancestor.Ref].RootOperationTypeDefinitions.Refs[len(d.SchemaExtensions[ancestor.Ref].RootOperationTypeDefinitions.Refs)-1] == ref + default: + return false + } +} + +func (d *Document) CreateRootOperationTypeDefinition(operationType OperationType, rootNodeRef int) (ref int) { + switch operationType { + case OperationTypeQuery: + d.Index.QueryTypeName = DefaultQueryTypeName + case OperationTypeMutation: + d.Index.MutationTypeName = DefaultMutationTypeName + case OperationTypeSubscription: + d.Index.SubscriptionTypeName = DefaultSubscriptionTypeName + default: + return + } + + nameRef := d.ObjectTypeDefinitionNameRef(d.RootNodes[rootNodeRef].Ref) + return d.AddRootOperationTypeDefinition(RootOperationTypeDefinition{ + OperationType: operationType, + NamedType: Type{ + TypeKind: TypeKindNamed, + Name: nameRef, + OfType: -1, + }, + }) +} + +func (d *Document) AddRootOperationTypeDefinition(rootOperationTypeDefinition RootOperationTypeDefinition) (ref int) { + d.RootOperationTypeDefinitions = append(d.RootOperationTypeDefinitions, rootOperationTypeDefinition) + return len(d.RootOperationTypeDefinitions) - 1 +} + +func (d *Document) ImportRootOperationTypeDefinition(name string, operationType OperationType) (ref int) { + nameBytes := []byte(name) + + switch operationType { + case OperationTypeQuery: + d.Index.QueryTypeName = nameBytes + case OperationTypeMutation: + d.Index.MutationTypeName = nameBytes + case OperationTypeSubscription: + d.Index.SubscriptionTypeName = nameBytes + default: + return -1 + } + + operationTypeDefinition := RootOperationTypeDefinition{ + OperationType: operationType, + NamedType: Type{ + Name: d.Input.AppendInputBytes(nameBytes), + TypeKind: TypeKindNamed, + OfType: -1, + }, + } + + return d.AddRootOperationTypeDefinition(operationTypeDefinition) +} + +func (d *Document) ImportRootOperationTypeDefinitions(queryTypeName, mutationTypeName, subscriptionTypeName string) (refs []int) { + if queryTypeName != "" { + refs = append(refs, d.ImportRootOperationTypeDefinition(queryTypeName, OperationTypeQuery)) + } + if mutationTypeName != "" { + refs = append(refs, d.ImportRootOperationTypeDefinition(mutationTypeName, OperationTypeMutation)) + } + if subscriptionTypeName != "" { + refs = append(refs, d.ImportRootOperationTypeDefinition(subscriptionTypeName, OperationTypeSubscription)) + } + + return refs +} + +func (d *Document) ReplaceRootOperationTypeDefinition(name string, operationType OperationType) (ref int, ok bool) { + node, exists := d.NodeByNameStr(name) + if !exists || node.Kind != NodeKindObjectTypeDefinition { + return -1, false + } + + var rootOperationFound bool + for i := range d.RootOperationTypeDefinitions { + if d.RootOperationTypeDefinitions[i].OperationType == operationType { + d.RootOperationTypeDefinitions = append(d.RootOperationTypeDefinitions[:i], d.RootOperationTypeDefinitions[i+1:]...) + rootOperationFound = true + break + } + } + if !rootOperationFound { + return -1, false + } + + ref = d.ImportRootOperationTypeDefinition(name, operationType) + return ref, true +} + +func IsRootType(nameBytes []byte) bool { + return bytes.Equal(DefaultQueryTypeName, nameBytes) || bytes.Equal(DefaultMutationTypeName, nameBytes) || bytes.Equal(DefaultSubscriptionTypeName, nameBytes) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_definition.go new file mode 100644 index 00000000000..2ce27a2729e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_definition.go @@ -0,0 +1,65 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// ScalarTypeDefinition +// example: +// scalar JSON +type ScalarTypeDefinition struct { + Description Description // optional, describes the scalar + ScalarLiteral position.Position // scalar + Name ByteSliceReference // e.g. JSON + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo +} + +func (d *Document) ScalarTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.ScalarTypeDefinitions[ref].Name) +} + +func (d *Document) ScalarTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.ScalarTypeDefinitions[ref].Name)) +} + +func (d *Document) ScalarTypeDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.ScalarTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.ScalarTypeDefinitions[ref].Description.Content) +} + +func (d *Document) ScalarTypeDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.ScalarTypeDefinitionDescriptionBytes(ref)) +} + +func (d *Document) ScalarTypeDefinitionHasDirectives(ref int) bool { + return d.ScalarTypeDefinitions[ref].HasDirectives +} + +func (d *Document) AddScalarTypeDefinition(definition ScalarTypeDefinition) (ref int) { + d.ScalarTypeDefinitions = append(d.ScalarTypeDefinitions, definition) + return len(d.ScalarTypeDefinitions) - 1 +} + +func (d *Document) ImportScalarTypeDefinition(name, description string) (ref int) { + return d.ImportScalarTypeDefinitionWithDirectives(name, description, nil) +} + +func (d *Document) ImportScalarTypeDefinitionWithDirectives(name, description string, directiveRefs []int) (ref int) { + definition := ScalarTypeDefinition{ + Description: d.ImportDescription(description), + Name: d.Input.AppendInputString(name), + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + ref = d.AddScalarTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindScalarTypeDefinition) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_extension.go new file mode 100644 index 00000000000..cd08fb93c07 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_scalar_type_extension.go @@ -0,0 +1,52 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type ScalarTypeExtension struct { + ExtendLiteral position.Position + ScalarTypeDefinition +} + +func (d *Document) ScalarTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.ScalarTypeExtensions[ref].Name) +} + +func (d *Document) ScalarTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.ScalarTypeExtensions[ref].Name)) +} + +func (d *Document) ScalarTypeExtensionDescriptionBytes(ref int) ByteSlice { + if !d.ScalarTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.ScalarTypeExtensions[ref].Description.Content) +} + +func (d *Document) ScalarTypeExtensionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.ScalarTypeExtensionDescriptionBytes(ref)) +} + +func (d *Document) ScalarTypeExtensionHasDirectives(ref int) bool { + return d.ScalarTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendScalarTypeDefinitionByScalarTypeExtension(scalarTypeDefinitionRef, scalarTypeExtensionRef int) { + if d.ScalarTypeExtensionHasDirectives(scalarTypeExtensionRef) { + d.ScalarTypeDefinitions[scalarTypeDefinitionRef].Directives.Refs = append(d.ScalarTypeDefinitions[scalarTypeDefinitionRef].Directives.Refs, d.ScalarTypeExtensions[scalarTypeExtensionRef].Directives.Refs...) + d.ScalarTypeDefinitions[scalarTypeDefinitionRef].HasDirectives = true + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: scalarTypeExtensionRef, Kind: NodeKindScalarTypeExtension}) +} + +func (d *Document) ImportAndExtendScalarTypeDefinitionByScalarTypeExtension(scalarTypeExtensionRef int) { + d.ImportScalarTypeDefinitionWithDirectives( + d.ScalarTypeExtensionNameString(scalarTypeExtensionRef), + d.ScalarTypeExtensionDescriptionString(scalarTypeExtensionRef), + d.ScalarTypeExtensions[scalarTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: scalarTypeExtensionRef, Kind: NodeKindScalarTypeExtension}) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_definition.go new file mode 100644 index 00000000000..602ac4e0e4c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_definition.go @@ -0,0 +1,60 @@ +package ast + +import "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + +type SchemaDefinition struct { + SchemaLiteral position.Position // schema + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + RootOperationTypeDefinitions RootOperationTypeDefinitionList // e.g. query: Query, mutation: Mutation, subscription: Subscription +} + +func (s *SchemaDefinition) AddRootOperationTypeDefinitionRefs(refs ...int) { + s.RootOperationTypeDefinitions.Refs = append(s.RootOperationTypeDefinitions.Refs, refs...) +} + +func (d *Document) HasSchemaDefinition() bool { + return d.SchemaDefinitionRef() != InvalidRef +} + +func (d *Document) SchemaDefinitionRef() int { + for i := range d.RootNodes { + if d.RootNodes[i].Kind == NodeKindSchemaDefinition { + return d.RootNodes[i].Ref + } + } + + return InvalidRef +} + +func (d *Document) AddSchemaDefinition(schemaDefinition SchemaDefinition) (ref int) { + d.SchemaDefinitions = append(d.SchemaDefinitions, schemaDefinition) + return len(d.SchemaDefinitions) - 1 +} + +func (d *Document) AddSchemaDefinitionRootNode(schemaDefinition SchemaDefinition) { + ref := d.AddSchemaDefinition(schemaDefinition) + schemaNode := Node{ + Kind: NodeKindSchemaDefinition, + Ref: ref, + } + d.RootNodes = append([]Node{schemaNode}, d.RootNodes...) +} + +func (d *Document) ImportSchemaDefinition(queryTypeName, mutationTypeName, subscriptionTypeName string) { + rootOperationTypeRefs := d.ImportRootOperationTypeDefinitions(queryTypeName, mutationTypeName, subscriptionTypeName) + + schemaDefinition := SchemaDefinition{ + RootOperationTypeDefinitions: RootOperationTypeDefinitionList{ + Refs: rootOperationTypeRefs, + }, + } + + d.AddSchemaDefinitionRootNode(schemaDefinition) +} + +func (d *Document) ReplaceRootOperationTypesOfSchemaDefinition(schemaDefinitionRef int, queryTypeName, mutationTypeName, subscriptionTypeName string) { + d.RootOperationTypeDefinitions = d.RootOperationTypeDefinitions[:0] + rootOperationTypeRefs := d.ImportRootOperationTypeDefinitions(queryTypeName, mutationTypeName, subscriptionTypeName) + d.SchemaDefinitions[schemaDefinitionRef].RootOperationTypeDefinitions.Refs = rootOperationTypeRefs +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_extension.go new file mode 100644 index 00000000000..fa53425a0e2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_schema_extension.go @@ -0,0 +1,8 @@ +package ast + +import "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + +type SchemaExtension struct { + ExtendLiteral position.Position + SchemaDefinition +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_selection.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_selection.go new file mode 100644 index 00000000000..b27d108ab83 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_selection.go @@ -0,0 +1,181 @@ +package ast + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type SelectionKind int + +const ( + SelectionKindUnknown SelectionKind = 18 + iota + SelectionKindField + SelectionKindFragmentSpread + SelectionKindInlineFragment +) + +type SelectionSet struct { + LBrace position.Position + RBrace position.Position + SelectionRefs []int +} + +type Selection struct { + Kind SelectionKind // one of Field, FragmentSpread, InlineFragment + Ref int // reference to the actual selection +} + +func (d *Document) CopySelection(ref int) int { + innerRef := -1 + + switch d.Selections[ref].Kind { + case SelectionKindField: + innerRef = d.CopyField(d.Selections[ref].Ref) + case SelectionKindFragmentSpread: + innerRef = d.CopyFragmentSpread(d.Selections[ref].Ref) + case SelectionKindInlineFragment: + innerRef = d.CopyInlineFragment(d.Selections[ref].Ref) + } + + return d.AddSelectionToDocument(Selection{ + Kind: d.Selections[ref].Kind, + Ref: innerRef, + }) +} + +func (d *Document) CopySelectionSet(ref int) int { + refs := d.NewEmptyRefs() + for _, r := range d.SelectionSets[ref].SelectionRefs { + refs = append(refs, d.CopySelection(r)) + } + return d.AddSelectionSetToDocument(SelectionSet{ + SelectionRefs: refs, + }) +} + +func (d *Document) PrintSelections(selections []int) (out string) { + out += "[" + for i, ref := range selections { + out += fmt.Sprintf("%+v", d.Selections[ref]) + if i != len(selections)-1 { + out += "," + } + } + out += "]" + return +} + +func (d *Document) SelectionsBeforeField(field int, selectionSet Node) bool { + if selectionSet.Kind != NodeKindSelectionSet { + return false + } + + if len(d.SelectionSets[selectionSet.Ref].SelectionRefs) == 1 { + return false + } + + for i, j := range d.SelectionSets[selectionSet.Ref].SelectionRefs { + if d.Selections[j].Kind == SelectionKindField && d.Selections[j].Ref == field { + return i != 0 + } + } + + return false +} + +func (d *Document) SelectionsAfter(selectionKind SelectionKind, selectionRef int, selectionSet Node) bool { + if selectionSet.Kind != NodeKindSelectionSet { + return false + } + + if len(d.SelectionSets[selectionSet.Ref].SelectionRefs) == 1 { + return false + } + + for i, j := range d.SelectionSets[selectionSet.Ref].SelectionRefs { + if d.Selections[j].Kind == selectionKind && d.Selections[j].Ref == selectionRef { + return i != len(d.SelectionSets[selectionSet.Ref].SelectionRefs)-1 + } + } + + return false +} + +func (d *Document) SelectionsAfterField(field int, selectionSet Node) bool { + return d.SelectionsAfter(SelectionKindField, field, selectionSet) +} + +func (d *Document) SelectionsAfterInlineFragment(inlineFragment int, selectionSet Node) bool { + return d.SelectionsAfter(SelectionKindInlineFragment, inlineFragment, selectionSet) +} + +func (d *Document) SelectionsAfterFragmentSpread(fragmentSpread int, selectionSet Node) bool { + return d.SelectionsAfter(SelectionKindFragmentSpread, fragmentSpread, selectionSet) +} + +func (d *Document) AddSelectionSetToDocument(set SelectionSet) int { + d.SelectionSets = append(d.SelectionSets, set) + return len(d.SelectionSets) - 1 +} + +func (d *Document) AddSelectionSet() Node { + return Node{ + Kind: NodeKindSelectionSet, + Ref: d.AddSelectionSetToDocument(SelectionSet{ + SelectionRefs: d.Refs[d.NextRefIndex()][:0], + }), + } +} + +func (d *Document) AddSelectionToDocument(selection Selection) int { + d.Selections = append(d.Selections, selection) + return len(d.Selections) - 1 +} + +func (d *Document) AddSelection(set int, selection Selection) { + d.SelectionSets[set].SelectionRefs = append(d.SelectionSets[set].SelectionRefs, d.AddSelectionToDocument(selection)) +} + +func (d *Document) EmptySelectionSet(ref int) { + d.SelectionSets[ref].SelectionRefs = d.SelectionSets[ref].SelectionRefs[:0] +} + +func (d *Document) AppendSelectionSet(ref int, appendRef int) { + d.SelectionSets[ref].SelectionRefs = append(d.SelectionSets[ref].SelectionRefs, d.SelectionSets[appendRef].SelectionRefs...) +} + +func (d *Document) ReplaceSelectionOnSelectionSet(ref, replace, with int) { + d.SelectionSets[ref].SelectionRefs = append(d.SelectionSets[ref].SelectionRefs[:replace], append(d.SelectionSets[with].SelectionRefs, d.SelectionSets[ref].SelectionRefs[replace+1:]...)...) +} + +func (d *Document) RemoveFromSelectionSet(ref int, index int) { + d.SelectionSets[ref].SelectionRefs = append(d.SelectionSets[ref].SelectionRefs[:index], d.SelectionSets[ref].SelectionRefs[index+1:]...) +} + +func (d *Document) SelectionSetHasFieldSelectionWithNameOrAliasBytes(set int, nameOrAlias []byte) bool { + for _, i := range d.SelectionSets[set].SelectionRefs { + if d.Selections[i].Kind != SelectionKindField { + continue + } + field := d.Selections[i].Ref + fieldName := d.FieldNameBytes(field) + if bytes.Equal(fieldName, nameOrAlias) { + return true + } + if !d.FieldAliasIsDefined(field) { + continue + } + fieldAlias := d.FieldAliasBytes(field) + if bytes.Equal(fieldAlias, nameOrAlias) { + return true + } + } + return false +} + +func (d *Document) SelectionSetHasFieldSelectionWithNameOrAliasString(set int, nameOrAlias string) bool { + return d.SelectionSetHasFieldSelectionWithNameOrAliasBytes(set, unsafebytes.StringToBytes(nameOrAlias)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_string.go new file mode 100644 index 00000000000..74d7ec4d725 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_string.go @@ -0,0 +1,160 @@ +// Code generated by "stringer -type=OperationType,ValueKind,TypeKind,SelectionKind,NodeKind,PathKind -output ast_string.go"; DO NOT EDIT. + +package ast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OperationTypeUnknown-0] + _ = x[OperationTypeQuery-1] + _ = x[OperationTypeMutation-2] + _ = x[OperationTypeSubscription-3] +} + +const _OperationType_name = "OperationTypeUnknownOperationTypeQueryOperationTypeMutationOperationTypeSubscription" + +var _OperationType_index = [...]uint8{0, 20, 38, 59, 84} + +func (i OperationType) String() string { + if i < 0 || i >= OperationType(len(_OperationType_index)-1) { + return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ValueKindUnknown-4] + _ = x[ValueKindString-5] + _ = x[ValueKindBoolean-6] + _ = x[ValueKindInteger-7] + _ = x[ValueKindFloat-8] + _ = x[ValueKindVariable-9] + _ = x[ValueKindNull-10] + _ = x[ValueKindList-11] + _ = x[ValueKindObject-12] + _ = x[ValueKindEnum-13] +} + +const _ValueKind_name = "ValueKindUnknownValueKindStringValueKindBooleanValueKindIntegerValueKindFloatValueKindVariableValueKindNullValueKindListValueKindObjectValueKindEnum" + +var _ValueKind_index = [...]uint8{0, 16, 31, 47, 63, 77, 94, 107, 120, 135, 148} + +func (i ValueKind) String() string { + i -= 4 + if i < 0 || i >= ValueKind(len(_ValueKind_index)-1) { + return "ValueKind(" + strconv.FormatInt(int64(i+4), 10) + ")" + } + return _ValueKind_name[_ValueKind_index[i]:_ValueKind_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeKindUnknown-14] + _ = x[TypeKindNamed-15] + _ = x[TypeKindList-16] + _ = x[TypeKindNonNull-17] +} + +const _TypeKind_name = "TypeKindUnknownTypeKindNamedTypeKindListTypeKindNonNull" + +var _TypeKind_index = [...]uint8{0, 15, 28, 40, 55} + +func (i TypeKind) String() string { + i -= 14 + if i < 0 || i >= TypeKind(len(_TypeKind_index)-1) { + return "TypeKind(" + strconv.FormatInt(int64(i+14), 10) + ")" + } + return _TypeKind_name[_TypeKind_index[i]:_TypeKind_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SelectionKindUnknown-18] + _ = x[SelectionKindField-19] + _ = x[SelectionKindFragmentSpread-20] + _ = x[SelectionKindInlineFragment-21] +} + +const _SelectionKind_name = "SelectionKindUnknownSelectionKindFieldSelectionKindFragmentSpreadSelectionKindInlineFragment" + +var _SelectionKind_index = [...]uint8{0, 20, 38, 65, 92} + +func (i SelectionKind) String() string { + i -= 18 + if i < 0 || i >= SelectionKind(len(_SelectionKind_index)-1) { + return "SelectionKind(" + strconv.FormatInt(int64(i+18), 10) + ")" + } + return _SelectionKind_name[_SelectionKind_index[i]:_SelectionKind_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NodeKindUnknown-22] + _ = x[NodeKindSchemaDefinition-23] + _ = x[NodeKindSchemaExtension-24] + _ = x[NodeKindObjectTypeDefinition-25] + _ = x[NodeKindObjectTypeExtension-26] + _ = x[NodeKindInterfaceTypeDefinition-27] + _ = x[NodeKindInterfaceTypeExtension-28] + _ = x[NodeKindUnionTypeDefinition-29] + _ = x[NodeKindUnionTypeExtension-30] + _ = x[NodeKindUnionMemberType-31] + _ = x[NodeKindEnumTypeDefinition-32] + _ = x[NodeKindEnumValueDefinition-33] + _ = x[NodeKindEnumTypeExtension-34] + _ = x[NodeKindInputObjectTypeDefinition-35] + _ = x[NodeKindInputValueDefinition-36] + _ = x[NodeKindInputObjectTypeExtension-37] + _ = x[NodeKindScalarTypeDefinition-38] + _ = x[NodeKindScalarTypeExtension-39] + _ = x[NodeKindDirectiveDefinition-40] + _ = x[NodeKindOperationDefinition-41] + _ = x[NodeKindSelectionSet-42] + _ = x[NodeKindField-43] + _ = x[NodeKindFieldDefinition-44] + _ = x[NodeKindFragmentSpread-45] + _ = x[NodeKindInlineFragment-46] + _ = x[NodeKindFragmentDefinition-47] + _ = x[NodeKindArgument-48] + _ = x[NodeKindDirective-49] + _ = x[NodeKindVariableDefinition-50] +} + +const _NodeKind_name = "NodeKindUnknownNodeKindSchemaDefinitionNodeKindSchemaExtensionNodeKindObjectTypeDefinitionNodeKindObjectTypeExtensionNodeKindInterfaceTypeDefinitionNodeKindInterfaceTypeExtensionNodeKindUnionTypeDefinitionNodeKindUnionTypeExtensionNodeKindUnionMemberTypeNodeKindEnumTypeDefinitionNodeKindEnumValueDefinitionNodeKindEnumTypeExtensionNodeKindInputObjectTypeDefinitionNodeKindInputValueDefinitionNodeKindInputObjectTypeExtensionNodeKindScalarTypeDefinitionNodeKindScalarTypeExtensionNodeKindDirectiveDefinitionNodeKindOperationDefinitionNodeKindSelectionSetNodeKindFieldNodeKindFieldDefinitionNodeKindFragmentSpreadNodeKindInlineFragmentNodeKindFragmentDefinitionNodeKindArgumentNodeKindDirectiveNodeKindVariableDefinition" + +var _NodeKind_index = [...]uint16{0, 15, 39, 62, 90, 117, 148, 178, 205, 231, 254, 280, 307, 332, 365, 393, 425, 453, 480, 507, 534, 554, 567, 590, 612, 634, 660, 676, 693, 719} + +func (i NodeKind) String() string { + i -= 22 + if i < 0 || i >= NodeKind(len(_NodeKind_index)-1) { + return "NodeKind(" + strconv.FormatInt(int64(i+22), 10) + ")" + } + return _NodeKind_name[_NodeKind_index[i]:_NodeKind_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnknownPathKind-0] + _ = x[ArrayIndex-1] + _ = x[FieldName-2] +} + +const _PathKind_name = "UnknownPathKindArrayIndexFieldName" + +var _PathKind_index = [...]uint8{0, 15, 25, 34} + +func (i PathKind) String() string { + if i < 0 || i >= PathKind(len(_PathKind_index)-1) { + return "PathKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PathKind_name[_PathKind_index[i]:_PathKind_index[i+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_type.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_type.go new file mode 100644 index 00000000000..d382a60436c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_type.go @@ -0,0 +1,249 @@ +package ast + +import ( + "bytes" + "io" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type TypeKind int + +const ( + TypeKindUnknown TypeKind = 14 + iota + TypeKindNamed + TypeKindList + TypeKindNonNull +) + +type Type struct { + TypeKind TypeKind // one of Named,List,NonNull + Name ByteSliceReference // e.g. String (only on NamedType) + Position position.Position + Open position.Position // [ (only on ListType) + Close position.Position // ] (only on ListType) + Bang position.Position // ! (only on NonNullType) + OfType int +} + +func (d *Document) TypeNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.Types[ref].Name) +} + +func (d *Document) TypeNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.Types[ref].Name)) +} + +func (d *Document) PrintType(ref int, w io.Writer) error { + switch d.Types[ref].TypeKind { + case TypeKindNonNull: + err := d.PrintType(d.Types[ref].OfType, w) + if err != nil { + return err + } + _, err = w.Write(literal.BANG) + return err + case TypeKindNamed: + _, err := w.Write(d.Input.ByteSlice(d.Types[ref].Name)) + return err + case TypeKindList: + _, err := w.Write(literal.LBRACK) + if err != nil { + return err + } + err = d.PrintType(d.Types[ref].OfType, w) + if err != nil { + return err + } + _, err = w.Write(literal.RBRACK) + return err + } + return nil +} + +func (d *Document) PrintTypeBytes(ref int, buf []byte) ([]byte, error) { + if buf == nil { + buf = make([]byte, 0, 24) + } + b := bytes.NewBuffer(buf) + err := d.PrintType(ref, b) + return b.Bytes(), err +} + +func (d *Document) AddType(t Type) (ref int) { + d.Types = append(d.Types, t) + return len(d.Types) - 1 +} + +func (d *Document) AddNamedTypeWithPosition(nameRef ByteSliceReference, position position.Position) (ref int) { + return d.AddType(Type{ + TypeKind: TypeKindNamed, + Name: nameRef, + OfType: -1, + Position: position, + }) +} + +func (d *Document) AddNamedType(name []byte) (ref int) { + nameRef := d.Input.AppendInputBytes(name) + return d.AddNamedTypeWithPosition(nameRef, position.Position{}) +} + +func (d *Document) AddListType(ofType int) (ref int) { + return d.AddListTypeWithPosition(ofType, position.Position{}, position.Position{}) +} + +func (d *Document) AddListTypeWithPosition(ofType int, open position.Position, close position.Position) (ref int) { + return d.AddType(Type{ + TypeKind: TypeKindList, + Open: open, + Close: close, + OfType: ofType, + Position: open, + }) +} + +func (d *Document) AddNonNullType(ofType int) (ref int) { + return d.AddNonNullTypeWithBangPosition(ofType, position.Position{}) +} + +func (d *Document) AddNonNullTypeWithBangPosition(ofType int, bang position.Position) (ref int) { + return d.AddType(Type{ + TypeKind: TypeKindNonNull, + Bang: bang, + OfType: ofType, + Position: d.Types[ofType].Position, + }) +} + +func (d *Document) AddNonNullNamedType(name []byte) (ref int) { + namedRef := d.AddNamedType(name) + return d.AddNonNullType(namedRef) +} + +func (d *Document) TypesAreEqualDeep(left int, right int) bool { + for { + if left == -1 || right == -1 { + return false + } + if d.Types[left].TypeKind != d.Types[right].TypeKind { + return false + } + if d.Types[left].TypeKind == TypeKindNamed { + leftName := d.TypeNameBytes(left) + rightName := d.TypeNameBytes(right) + return bytes.Equal(leftName, rightName) + } + left = d.Types[left].OfType + right = d.Types[right].OfType + } +} + +func (d *Document) TypeIsScalar(ref int, definition *Document) bool { + switch d.Types[ref].TypeKind { + case TypeKindNamed: + typeName := d.TypeNameBytes(ref) + node, _ := definition.Index.FirstNodeByNameBytes(typeName) + return node.Kind == NodeKindScalarTypeDefinition + case TypeKindNonNull: + return d.TypeIsScalar(d.Types[ref].OfType, definition) + } + return false +} + +func (d *Document) TypeIsEnum(ref int, definition *Document) bool { + switch d.Types[ref].TypeKind { + case TypeKindNamed: + typeName := d.TypeNameBytes(ref) + node, _ := definition.Index.FirstNodeByNameBytes(typeName) + return node.Kind == NodeKindEnumTypeDefinition + case TypeKindNonNull: + return d.TypeIsEnum(d.Types[ref].OfType, definition) + } + return false +} + +func (d *Document) TypeIsNonNull(ref int) bool { + return d.Types[ref].TypeKind == TypeKindNonNull +} + +func (d *Document) TypeIsList(ref int) bool { + switch d.Types[ref].TypeKind { + case TypeKindList: + return true + case TypeKindNonNull: + return d.TypeIsList(d.Types[ref].OfType) + default: + return false + } +} + +func (d *Document) TypesAreCompatibleDeep(left int, right int) bool { + for { + if left == -1 || right == -1 { + return false + } + if d.Types[left].TypeKind != d.Types[right].TypeKind { + return false + } + if d.Types[left].TypeKind == TypeKindNamed { + leftName := d.TypeNameBytes(left) + rightName := d.TypeNameBytes(right) + if bytes.Equal(leftName, rightName) { + return true + } + leftNode, _ := d.Index.FirstNodeByNameBytes(leftName) + rightNode, _ := d.Index.FirstNodeByNameBytes(rightName) + if leftNode.Kind == rightNode.Kind { + return false + } + if leftNode.Kind == NodeKindInterfaceTypeDefinition && rightNode.Kind == NodeKindObjectTypeDefinition { + return d.NodeImplementsInterface(rightNode, leftNode) + } + if leftNode.Kind == NodeKindObjectTypeDefinition && rightNode.Kind == NodeKindInterfaceTypeDefinition { + return d.NodeImplementsInterface(leftNode, rightNode) + } + if leftNode.Kind == NodeKindUnionTypeDefinition && rightNode.Kind == NodeKindObjectTypeDefinition { + return d.NodeIsUnionMember(rightNode, leftNode) + } + if leftNode.Kind == NodeKindObjectTypeDefinition && rightNode.Kind == NodeKindUnionTypeDefinition { + return d.NodeIsUnionMember(leftNode, rightNode) + } + return false + } + left = d.Types[left].OfType + right = d.Types[right].OfType + } +} + +func (d *Document) ResolveTypeNameBytes(ref int) ByteSlice { + resolvedTypeRef := d.ResolveUnderlyingType(ref) + return d.TypeNameBytes(resolvedTypeRef) +} + +func (d *Document) ResolveTypeNameString(ref int) string { + return unsafebytes.BytesToString(d.ResolveTypeNameBytes(ref)) +} + +func (d *Document) ResolveUnderlyingType(ref int) (typeRef int) { + typeRef = ref + graphqlType := d.Types[ref] + for graphqlType.TypeKind != TypeKindNamed { + typeRef = graphqlType.OfType + graphqlType = d.Types[typeRef] + + } + return +} + +func (d *Document) ResolveListOrNameType(ref int) (typeRef int) { + typeRef = ref + graphqlType := d.Types[ref] + for (graphqlType.TypeKind != TypeKindNamed) && (graphqlType.TypeKind != TypeKindList) { + typeRef = graphqlType.OfType + graphqlType = d.Types[typeRef] + } + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_definition.go new file mode 100644 index 00000000000..eedb81463f5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_definition.go @@ -0,0 +1,111 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// UnionTypeDefinition +// example: +// union SearchResult = Photo | Person +type UnionTypeDefinition struct { + Description Description // optional, describes union + UnionLiteral position.Position // union + Name ByteSliceReference // e.g. SearchResult + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo + Equals position.Position // = + HasUnionMemberTypes bool + UnionMemberTypes TypeList // optional, e.g. Photo | Person + HasFieldDefinitions bool + FieldsDefinition FieldDefinitionList // contains a single field: { __typename: String! } +} + +func (d *Document) UnionTypeDefinitionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.UnionTypeDefinitions[ref].Name) +} + +func (d *Document) UnionTypeDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.UnionTypeDefinitions[ref].Name)) +} + +func (d *Document) UnionTypeDefinitionDescriptionBytes(ref int) ByteSlice { + if !d.UnionTypeDefinitions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.UnionTypeDefinitions[ref].Description.Content) +} + +func (d *Document) UnionTypeDefinitionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.UnionTypeDefinitionDescriptionBytes(ref)) +} + +func (d *Document) UnionTypeDefinitionHasField(ref int, fieldName []byte) bool { + for _, fieldRef := range d.UnionTypeDefinitions[ref].FieldsDefinition.Refs { + if bytes.Equal(d.FieldDefinitionNameBytes(fieldRef),fieldName){ + return true + } + } + return false +} + +func (d *Document) UnionMemberTypeIsFirst(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindUnionTypeDefinition: + return len(d.UnionTypeDefinitions[ancestor.Ref].UnionMemberTypes.Refs) != 0 && + d.UnionTypeDefinitions[ancestor.Ref].UnionMemberTypes.Refs[0] == ref + case NodeKindUnionTypeExtension: + return len(d.UnionTypeExtensions[ancestor.Ref].UnionMemberTypes.Refs) != 0 && + d.UnionTypeExtensions[ancestor.Ref].UnionMemberTypes.Refs[0] == ref + default: + return false + } +} + +func (d *Document) UnionMemberTypeIsLast(ref int, ancestor Node) bool { + switch ancestor.Kind { + case NodeKindUnionTypeDefinition: + return len(d.UnionTypeDefinitions[ancestor.Ref].UnionMemberTypes.Refs) != 0 && + d.UnionTypeDefinitions[ancestor.Ref].UnionMemberTypes.Refs[len(d.UnionTypeDefinitions[ancestor.Ref].UnionMemberTypes.Refs)-1] == ref + case NodeKindUnionTypeExtension: + return len(d.UnionTypeExtensions[ancestor.Ref].UnionMemberTypes.Refs) != 0 && + d.UnionTypeExtensions[ancestor.Ref].UnionMemberTypes.Refs[len(d.UnionTypeExtensions[ancestor.Ref].UnionMemberTypes.Refs)-1] == ref + default: + return false + } +} + +func (d *Document) UnionTypeDefinitionHasDirectives(ref int) bool { + return d.UnionTypeDefinitions[ref].HasDirectives +} + +func (d *Document) AddUnionTypeDefinition(definition UnionTypeDefinition) (ref int) { + d.UnionTypeDefinitions = append(d.UnionTypeDefinitions, definition) + return len(d.UnionTypeDefinitions) - 1 +} + +func (d *Document) ImportUnionTypeDefinition(name, description string, typeRefs []int) (ref int) { + return d.ImportUnionTypeDefinitionWithDirectives(name, description, typeRefs, nil) +} + +func (d *Document) ImportUnionTypeDefinitionWithDirectives(name, description string, typeRefs []int, directiveRefs []int) (ref int) { + definition := UnionTypeDefinition{ + Name: d.Input.AppendInputString(name), + Description: d.ImportDescription(description), + HasUnionMemberTypes: len(typeRefs) > 0, + UnionMemberTypes: TypeList{ + Refs: typeRefs, + }, + HasDirectives: len(directiveRefs) > 0, + Directives: DirectiveList{ + Refs: directiveRefs, + }, + } + + ref = d.AddUnionTypeDefinition(definition) + d.ImportRootNode(ref, NodeKindUnionTypeDefinition) + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_extension.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_extension.go new file mode 100644 index 00000000000..8f45944eb6d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_union_type_extension.go @@ -0,0 +1,62 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type UnionTypeExtension struct { + ExtendLiteral position.Position + UnionTypeDefinition +} + +func (d *Document) UnionTypeExtensionNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.UnionTypeExtensions[ref].Name) +} + +func (d *Document) UnionTypeExtensionNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.UnionTypeExtensions[ref].Name)) +} + +func (d *Document) UnionTypeExtensionDescriptionBytes(ref int) ByteSlice { + if !d.UnionTypeExtensions[ref].Description.IsDefined { + return nil + } + return d.Input.ByteSlice(d.UnionTypeExtensions[ref].Description.Content) +} + +func (d *Document) UnionTypeExtensionDescriptionString(ref int) string { + return unsafebytes.BytesToString(d.UnionTypeExtensionDescriptionBytes(ref)) +} + +func (d *Document) UnionTypeExtensionHasUnionMemberTypes(ref int) bool { + return d.UnionTypeExtensions[ref].HasUnionMemberTypes +} + +func (d *Document) UnionTypeExtensionHasDirectives(ref int) bool { + return d.UnionTypeExtensions[ref].HasDirectives +} + +func (d *Document) ExtendUnionTypeDefinitionByUnionTypeExtension(unionTypeDefinitionRef, unionTypeExtensionRef int) { + if d.UnionTypeExtensionHasDirectives(unionTypeExtensionRef) { + d.UnionTypeDefinitions[unionTypeDefinitionRef].Directives.Refs = append(d.UnionTypeDefinitions[unionTypeDefinitionRef].Directives.Refs, d.UnionTypeExtensions[unionTypeExtensionRef].Directives.Refs...) + d.UnionTypeDefinitions[unionTypeDefinitionRef].HasDirectives = true + } + + if d.UnionTypeExtensionHasUnionMemberTypes(unionTypeExtensionRef) { + d.UnionTypeDefinitions[unionTypeDefinitionRef].UnionMemberTypes.Refs = append(d.UnionTypeDefinitions[unionTypeDefinitionRef].UnionMemberTypes.Refs, d.UnionTypeExtensions[unionTypeExtensionRef].UnionMemberTypes.Refs...) + d.UnionTypeDefinitions[unionTypeDefinitionRef].HasUnionMemberTypes = true + } + + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: unionTypeExtensionRef, Kind: NodeKindUnionTypeExtension}) +} + +func (d *Document) ImportAndExtendUnionTypeDefinitionByUnionTypeExtension(unionTypeExtensionRef int) { + d.ImportUnionTypeDefinitionWithDirectives( + d.UnionTypeExtensionNameString(unionTypeExtensionRef), + d.UnionTypeExtensionDescriptionString(unionTypeExtensionRef), + d.UnionTypeExtensions[unionTypeExtensionRef].UnionMemberTypes.Refs, + d.UnionTypeExtensions[unionTypeExtensionRef].Directives.Refs, + ) + d.Index.MergedTypeExtensions = append(d.Index.MergedTypeExtensions, Node{Ref: unionTypeExtensionRef, Kind: NodeKindUnionTypeExtension}) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_boolean_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_boolean_value.go new file mode 100644 index 00000000000..f051a3f86bc --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_boolean_value.go @@ -0,0 +1,13 @@ +package ast + +// BooleanValues +// one of: true, false +type BooleanValue bool + +func (d *Document) BooleanValue(ref int) BooleanValue { + return d.BooleanValues[ref] +} + +func (d *Document) BooleanValuesAreEqual(left, right int) bool { + return d.BooleanValue(left) == d.BooleanValue(right) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_enum_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_enum_value.go new file mode 100644 index 00000000000..169da2d56ea --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_enum_value.go @@ -0,0 +1,45 @@ +package ast + +import ( + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" +) + +// EnumValue +// example: +// Name but not true or false or null +type EnumValue struct { + Name ByteSliceReference // e.g. ORIGIN +} + +func (d *Document) CopyEnumValue(ref int) int { + return d.AddEnumValue(EnumValue{ + Name: d.copyByteSliceReference(d.EnumValues[ref].Name), + }) +} + +func (d *Document) EnumValueName(ref int) ByteSliceReference { + return d.EnumValues[ref].Name +} + +func (d *Document) EnumValueNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.EnumValues[ref].Name) +} + +func (d *Document) EnumValueNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.EnumValues[ref].Name)) +} + +func (d *Document) EnumValuesAreEqual(left, right int) bool { + return d.Input.ByteSliceReferenceContentEquals(d.EnumValueName(left), d.EnumValueName(right)) +} + +func (d *Document) AddEnumValue(value EnumValue) (ref int) { + d.EnumValues = append(d.EnumValues, value) + return len(d.EnumValues) - 1 +} + +func (d *Document) ImportEnumValue(name ByteSlice) (ref int) { + return d.AddEnumValue(EnumValue{ + Name: d.Input.AppendInputBytes(name), + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_float_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_float_value.go new file mode 100644 index 00000000000..fbb01ebb735 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_float_value.go @@ -0,0 +1,58 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// FloatValue +// example: +// 13.37 / -13.37 +type FloatValue struct { + Negative bool // indicates if the value is negative + NegativeSign position.Position // optional - + Raw ByteSliceReference // e.g. 13.37 +} + +func (d *Document) CopyFloatValue(ref int) int { + return d.AddFloatValue(FloatValue{ + Negative: d.FloatValues[ref].Negative, + Raw: d.copyByteSliceReference(d.FloatValues[ref].Raw), + }) +} + +func (d *Document) FloatValueAsFloat32(ref int) (out float32) { + in := d.Input.ByteSlice(d.FloatValues[ref].Raw) + out = unsafebytes.BytesToFloat32(in) + if d.FloatValues[ref].Negative { + out = -out + } + return +} + +func (d *Document) FloatValueIsNegative(ref int) bool { + return d.FloatValues[ref].Negative +} + +func (d *Document) FloatValueRaw(ref int) ByteSlice { + return d.Input.ByteSlice(d.FloatValues[ref].Raw) +} + +func (d *Document) FloatValuesAreEqual(left, right int) bool { + return d.FloatValueIsNegative(left) == d.FloatValueIsNegative(right) && + bytes.Equal(d.FloatValueRaw(left), d.FloatValueRaw(right)) +} + +func (d *Document) AddFloatValue(value FloatValue) (ref int) { + d.FloatValues = append(d.FloatValues, value) + return len(d.FloatValues) - 1 +} + +func (d *Document) ImportFloatValue(raw ByteSlice, isNegative bool) (ref int) { + return d.AddFloatValue(FloatValue{ + Negative: isNegative, + Raw: d.Input.AppendInputBytes(raw), + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_int_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_int_value.go new file mode 100644 index 00000000000..6c99631a5db --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_int_value.go @@ -0,0 +1,76 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// IntValue +// example: +// 123 / -123 +type IntValue struct { + Negative bool // indicates if the value is negative + NegativeSign position.Position // optional - + Raw ByteSliceReference // e.g. 123 +} + +func (d *Document) CopyIntValue(ref int) int { + return d.AddIntValue(IntValue{ + Negative: d.IntValues[ref].Negative, + Raw: d.copyByteSliceReference(d.IntValues[ref].Raw), + }) +} + +func (d *Document) IntValueAsInt(ref int) (out int64) { + in := d.Input.ByteSlice(d.IntValues[ref].Raw) + out = unsafebytes.BytesToInt64(in) + if d.IntValues[ref].Negative { + out = -out + } + return +} + +func (d *Document) IntValueAsInt32(ref int) (out int32) { + in := d.Input.ByteSlice(d.IntValues[ref].Raw) + out = unsafebytes.BytesToInt32(in) + if d.IntValues[ref].Negative { + out = -out + } + return +} + +func (d *Document) IntValueValidInt32(ref int) bool { + in := d.Input.ByteSlice(d.IntValues[ref].Raw) + return unsafebytes.BytesIsValidInt32(in) +} + +func (d *Document) IntValue(ref int) IntValue { + return d.IntValues[ref] +} + +func (d *Document) IntValueIsNegative(ref int) bool { + return d.IntValues[ref].Negative +} + +func (d *Document) IntValueRaw(ref int) ByteSlice { + return d.Input.ByteSlice(d.IntValues[ref].Raw) +} + +func (d *Document) IntValuesAreEquals(left, right int) bool { + return d.IntValueIsNegative(left) == d.IntValueIsNegative(right) && + bytes.Equal(d.IntValueRaw(left), d.IntValueRaw(right)) +} + +func (d *Document) AddIntValue(value IntValue) (ref int) { + d.IntValues = append(d.IntValues, value) + return len(d.IntValues) - 1 +} + +func (d *Document) ImportIntValue(raw ByteSlice, isNegative bool) (ref int) { + return d.AddIntValue(IntValue{ + Negative: isNegative, + Raw: d.Input.AppendInputBytes(raw), + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_list_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_list_value.go new file mode 100644 index 00000000000..5a6e0476b9d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_list_value.go @@ -0,0 +1,45 @@ +package ast + +import "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + +type ListValue struct { + LBRACK position.Position // [ + Refs []int // Value + RBRACK position.Position // ] +} + +func (d *Document) CopyListValue(ref int) int { + refs := d.NewEmptyRefs() + for _, r := range d.ListValues[ref].Refs { + refs = append(refs, d.CopyValue(r)) + } + return d.AddListValue(ListValue{ + Refs: refs, + }) +} + +func (d *Document) ListValuesAreEqual(left, right int) bool { + leftValues, rightValues := d.ListValues[left].Refs, d.ListValues[right].Refs + if len(leftValues) != len(rightValues) { + return false + } + for i := 0; i < len(leftValues); i++ { + left, right = leftValues[i], rightValues[i] + leftValue, rightValue := d.Value(left), d.Value(right) + if !d.ValuesAreEqual(leftValue, rightValue) { + return false + } + } + return true +} + +func (d *Document) AddListValue(value ListValue) (ref int) { + d.ListValues = append(d.ListValues, value) + return len(d.ListValues) - 1 +} + +func (d *Document) ImportListValue(valueRefs []int) (ref int) { + return d.AddListValue(ListValue{ + Refs: valueRefs, + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_object_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_object_value.go new file mode 100644 index 00000000000..32bede24f70 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_object_value.go @@ -0,0 +1,33 @@ +package ast + +import "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + +// ObjectValue +// example: +// { lon: 12.43, lat: -53.211 } +type ObjectValue struct { + LBRACE position.Position + Refs []int // ObjectField + RBRACE position.Position +} + +func (d *Document) CopyObjectValue(ref int) int { + refs := d.NewEmptyRefs() + for _, r := range d.ObjectValues[ref].Refs { + refs = append(refs, d.CopyObjectField(r)) + } + return d.AddObjectValue(ObjectValue{ + Refs: refs, + }) +} + +func (d *Document) AddObjectValue(value ObjectValue) (ref int) { + d.ObjectValues = append(d.ObjectValues, value) + return len(d.ObjectValues) - 1 +} + +func (d *Document) ImportObjectValue(fieldRefs []int) (ref int) { + return d.AddObjectValue(ObjectValue{ + Refs: fieldRefs, + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_string_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_string_value.go new file mode 100644 index 00000000000..b336c181caa --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_string_value.go @@ -0,0 +1,55 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" +) + +// StringValue +// example: +// "foo" +type StringValue struct { + BlockString bool // """foo""" = blockString, "foo" string + Content ByteSliceReference // e.g. foo +} + +func (d *Document) CopyStringValue(ref int) int { + return d.AddStringValue(StringValue{ + BlockString: d.StringValues[ref].BlockString, + Content: d.copyByteSliceReference(d.StringValues[ref].Content), + }) +} + +func (d *Document) StringValue(ref int) StringValue { + return d.StringValues[ref] +} + +func (d *Document) StringValueContentBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.StringValues[ref].Content) +} + +func (d *Document) StringValueContentString(ref int) string { + return unsafebytes.BytesToString(d.StringValueContentBytes(ref)) +} + +func (d *Document) StringValueIsBlockString(ref int) bool { + return d.StringValues[ref].BlockString +} + +func (d *Document) StringValuesAreEquals(left, right int) bool { + return d.StringValueIsBlockString(left) == d.StringValueIsBlockString(right) && + bytes.Equal(d.StringValueContentBytes(left), d.StringValueContentBytes(right)) +} + +func (d *Document) AddStringValue(value StringValue) (ref int) { + d.StringValues = append(d.StringValues, value) + return len(d.StringValues) - 1 +} + +func (d *Document) ImportStringValue(raw ByteSlice, isBlockString bool) (ref int) { + return d.AddStringValue(StringValue{ + BlockString: isBlockString, + Content: d.Input.AppendInputBytes(raw), + }) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_variable_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_variable_value.go new file mode 100644 index 00000000000..1019abc77e8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_val_variable_value.go @@ -0,0 +1,63 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// VariableValue +// example: +// $devicePicSize +type VariableValue struct { + Dollar position.Position // $ + Name ByteSliceReference // e.g. devicePicSize +} + +func (d *Document) CopyVariableValue(ref int) int { + return d.AddVariableValue(VariableValue{ + Name: d.copyByteSliceReference(d.VariableValues[ref].Name), + }) +} + +func (d *Document) VariableValueNameBytes(ref int) ByteSlice { + return d.Input.ByteSlice(d.VariableValues[ref].Name) +} + +func (d *Document) VariableValueNameString(ref int) string { + return unsafebytes.BytesToString(d.Input.ByteSlice(d.VariableValues[ref].Name)) +} + +func (d *Document) VariableValuesAreEqual(left, right int) bool { + return bytes.Equal(d.VariableValueNameBytes(left), d.VariableValueNameBytes(right)) +} + +func (d *Document) AddVariableValue(value VariableValue) (ref int) { + d.VariableValues = append(d.VariableValues, value) + return len(d.VariableValues) - 1 +} + +func (d *Document) ImportVariableValue(name ByteSlice) (ref int) { + return d.AddVariableValue(VariableValue{ + Name: d.Input.AppendInputBytes(name), + }) +} + +func (d *Document) AddVariableValueArgument(argName, variableName []byte) (variableValueRef, argRef int) { + variable := VariableValue{ + Name: d.Input.AppendInputBytes(variableName), + } + d.VariableValues = append(d.VariableValues, variable) + variableValueRef = len(d.VariableValues) - 1 + arg := Argument{ + Name: d.Input.AppendInputBytes(argName), + Value: Value{ + Kind: ValueKindVariable, + Ref: variableValueRef, + }, + } + d.Arguments = append(d.Arguments, arg) + argRef = len(d.Arguments) - 1 + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_value.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_value.go new file mode 100644 index 00000000000..b8ecf388422 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_value.go @@ -0,0 +1,314 @@ +package ast + +import ( + "bytes" + "fmt" + "io" + + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/quotes" + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type ValueKind int + +const ( + ValueKindUnknown ValueKind = 4 + iota + ValueKindString + ValueKindBoolean + ValueKindInteger + ValueKindFloat + ValueKindVariable + ValueKindNull + ValueKindList + ValueKindObject + ValueKindEnum +) + +type Value struct { + Kind ValueKind // e.g. 100 or "Bar" + Ref int + Position position.Position +} + +func (d *Document) CopyValue(ref int) int { + return d.AddValue(Value{ + Kind: d.Values[ref].Kind, + Ref: d.copyValueRef(d.Values[ref].Kind, d.Values[ref].Ref), + }) +} + +func (d *Document) copyValueRef(kind ValueKind, valueRef int) int { + switch kind { + case ValueKindString: + return d.CopyStringValue(valueRef) + case ValueKindBoolean: + // Nothing to copy! + return valueRef + case ValueKindInteger: + return d.CopyIntValue(valueRef) + case ValueKindFloat: + return d.CopyFloatValue(valueRef) + case ValueKindVariable: + return d.CopyVariableValue(valueRef) + case ValueKindNull: + // Nothing to copy! + return InvalidRef + case ValueKindList: + return d.CopyListValue(valueRef) + case ValueKindObject: + return d.CopyObjectValue(valueRef) + case ValueKindEnum: + return d.CopyEnumValue(valueRef) + default: + return InvalidRef + } +} + +func (d *Document) ValueContentBytes(value Value) ByteSlice { + switch value.Kind { + case ValueKindEnum: + return d.EnumValueNameBytes(value.Ref) + case ValueKindString: + return d.StringValueContentBytes(value.Ref) + case ValueKindInteger: + return d.IntValueRaw(value.Ref) + case ValueKindFloat: + return d.FloatValueRaw(value.Ref) + } + panic(fmt.Errorf("ValueContentBytes not implemented for ValueKind: %s", value.Kind)) +} + +func (d *Document) ValueContentString(value Value) string { + return unsafebytes.BytesToString(d.ValueContentBytes(value)) +} + +func (d *Document) ValueContainsVariable(value Value) bool { + switch value.Kind { + case ValueKindEnum: + return false + case ValueKindBoolean: + return false + case ValueKindFloat: + return false + case ValueKindList: + for _, ref := range d.ListValues[value.Ref].Refs { + if d.ValueContainsVariable(d.Value(ref)) { + return true + } + } + return false + case ValueKindObject: + for _, ref := range d.ObjectValues[value.Ref].Refs { + if d.ValueContainsVariable(d.ObjectFields[ref].Value) { + return true + } + } + return false + case ValueKindInteger: + return false + case ValueKindNull: + return false + case ValueKindString: + return false + case ValueKindVariable: + return true + default: + return false + } +} + +func (d *Document) ValueToJSON(value Value) ([]byte, error) { + switch value.Kind { + case ValueKindNull: + return literal.NULL, nil + case ValueKindEnum: + return quotes.WrapBytes(d.EnumValueNameBytes(value.Ref)), nil + case ValueKindInteger: + intValueBytes := d.IntValueRaw(value.Ref) + if d.IntValueIsNegative(value.Ref) { + return append(literal.SUB, intValueBytes...), nil + } + return intValueBytes, nil + case ValueKindFloat: + floatValueBytes := d.FloatValueRaw(value.Ref) + if d.FloatValueIsNegative(value.Ref) { + return append(literal.SUB, floatValueBytes...), nil + } + return floatValueBytes, nil + case ValueKindBoolean: + if value.Ref == 0 { + return literal.FALSE, nil + } + return literal.TRUE, nil + case ValueKindString: + return quotes.WrapBytes(d.StringValueContentBytes(value.Ref)), nil + case ValueKindList: + out := []byte("[]") + for _, i := range d.ListValues[value.Ref].Refs { + item, err := d.ValueToJSON(d.Values[i]) + if err != nil { + return nil, err + } + out, err = sjson.SetRawBytes(out, "-1", item) + if err != nil { + return nil, err + } + } + return out, nil + case ValueKindObject: + out := []byte("{}") + for i := len(d.ObjectValues[value.Ref].Refs) - 1; i >= 0; i-- { + ref := d.ObjectValues[value.Ref].Refs[i] + fieldNameString := d.ObjectFieldNameString(ref) + fieldValueBytes, err := d.ValueToJSON(d.ObjectFieldValue(ref)) + if err != nil { + return nil, err + } + out, err = sjson.SetRawBytes(out, fieldNameString, fieldValueBytes) + if err != nil { + return nil, err + } + } + return out, nil + default: + return nil, fmt.Errorf("ValueToJSON: not implemented for kind: %s", value.Kind.String()) + } +} + +// nolint +func (d *Document) PrintValue(value Value, w io.Writer) (err error) { + switch value.Kind { + case ValueKindBoolean: + if d.BooleanValues[value.Ref] { + _, err = w.Write(literal.TRUE) + } else { + _, err = w.Write(literal.FALSE) + } + case ValueKindString: + // This code assumes string content is valid for the associated string + // type (block/non-block) according to the GraphQL spec. Content IS NOT + // processed to quote characters that are invalid for the associated + // type. + // + // GraphQL spec: https://spec.graphql.org/June2018/#StringValue + isBlockString := d.StringValues[value.Ref].BlockString + _, err = w.Write(literal.QUOTE) + if isBlockString { + _, err = w.Write(literal.QUOTE) + _, err = w.Write(literal.QUOTE) + } + _, err = w.Write(d.Input.ByteSlice(d.StringValues[value.Ref].Content)) + _, err = w.Write(literal.QUOTE) + if isBlockString { + _, err = w.Write(literal.QUOTE) + _, err = w.Write(literal.QUOTE) + } + case ValueKindInteger: + if d.IntValues[value.Ref].Negative { + _, err = w.Write(literal.SUB) + } + _, err = w.Write(d.Input.ByteSlice(d.IntValues[value.Ref].Raw)) + case ValueKindFloat: + if d.FloatValues[value.Ref].Negative { + _, err = w.Write(literal.SUB) + } + _, err = w.Write(d.Input.ByteSlice(d.FloatValues[value.Ref].Raw)) + case ValueKindVariable: + _, err = w.Write(literal.DOLLAR) + _, err = w.Write(d.Input.ByteSlice(d.VariableValues[value.Ref].Name)) + case ValueKindNull: + _, err = w.Write(literal.NULL) + case ValueKindList: + _, err = w.Write(literal.LBRACK) + for i, j := range d.ListValues[value.Ref].Refs { + err = d.PrintValue(d.Value(j), w) + if err != nil { + return + } + if i != len(d.ListValues[value.Ref].Refs)-1 { + _, err = w.Write(literal.COMMA) + } + } + _, err = w.Write(literal.RBRACK) + case ValueKindObject: + _, err = w.Write(literal.LBRACE) + for i, j := range d.ObjectValues[value.Ref].Refs { + _, err = w.Write(d.ObjectFieldNameBytes(j)) + if err != nil { + return + } + _, err = w.Write(literal.COLON) + if err != nil { + return + } + _, err = w.Write(literal.SPACE) + if err != nil { + return + } + err = d.PrintValue(d.ObjectFieldValue(j), w) + if err != nil { + return + } + if i != len(d.ObjectValues[value.Ref].Refs)-1 { + _, err = w.Write(literal.COMMA) + if err != nil { + return + } + } + } + _, err = w.Write(literal.RBRACE) + case ValueKindEnum: + _, err = w.Write(d.Input.ByteSlice(d.EnumValues[value.Ref].Name)) + } + return +} + +func (d *Document) PrintValueBytes(value Value, buf []byte) ([]byte, error) { + if buf == nil { + buf = make([]byte, 0, 24) + } + b := bytes.NewBuffer(buf) + err := d.PrintValue(value, b) + return b.Bytes(), err +} + +func (d *Document) Value(ref int) Value { + return d.Values[ref] +} + +func (d *Document) ValuesAreEqual(left, right Value) bool { + if left.Kind != right.Kind { + return false + } + switch left.Kind { + case ValueKindString: + return d.StringValuesAreEquals(left.Ref, right.Ref) + case ValueKindBoolean: + return d.BooleanValuesAreEqual(left.Ref, right.Ref) + case ValueKindInteger: + return d.IntValuesAreEquals(left.Ref, right.Ref) + case ValueKindFloat: + return d.FloatValuesAreEqual(left.Ref, right.Ref) + case ValueKindVariable: + return d.VariableValuesAreEqual(left.Ref, right.Ref) + case ValueKindNull: + return true + case ValueKindList: + return d.ListValuesAreEqual(left.Ref, right.Ref) + case ValueKindObject: + return d.ObjectValuesAreEqual(left.Ref, right.Ref) + case ValueKindEnum: + return d.EnumValuesAreEqual(left.Ref, right.Ref) + default: + return false + } +} + +func (d *Document) AddValue(value Value) (ref int) { + d.Values = append(d.Values, value) + return len(d.Values) - 1 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_variable_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_variable_definition.go new file mode 100644 index 00000000000..9aa05145ded --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/ast_variable_definition.go @@ -0,0 +1,77 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type VariableDefinitionList struct { + LPAREN position.Position // ( + Refs []int // VariableDefinition + RPAREN position.Position // ) +} + +// VariableDefinition +// example: +// $devicePicSize: Int = 100 @small +type VariableDefinition struct { + VariableValue Value // $ Name + Colon position.Position // : + Type int // e.g. String + DefaultValue DefaultValue // optional, e.g. = "Default" + HasDirectives bool + Directives DirectiveList // optional, e.g. @foo +} + +func (d *Document) VariableDefinitionNameBytes(ref int) ByteSlice { + return d.VariableValueNameBytes(d.VariableDefinitions[ref].VariableValue.Ref) +} + +func (d *Document) VariableDefinitionNameString(ref int) string { + return unsafebytes.BytesToString(d.VariableValueNameBytes(d.VariableDefinitions[ref].VariableValue.Ref)) +} + +func (d *Document) VariableDefinitionHasDefaultValue(ref int) bool { + return d.VariableDefinitions[ref].DefaultValue.IsDefined +} + +func (d *Document) VariableDefinitionDefaultValue(ref int) Value { + return d.VariableDefinitions[ref].DefaultValue.Value +} + +func (d *Document) VariableDefinitionByNameAndOperation(operationDefinition int, name ByteSlice) (definition int, exists bool) { + if !d.OperationDefinitions[operationDefinition].HasVariableDefinitions { + return -1, false + } + for _, i := range d.OperationDefinitions[operationDefinition].VariableDefinitions.Refs { + definitionName := d.VariableValueNameBytes(d.VariableDefinitions[i].VariableValue.Ref) + if bytes.Equal(name, definitionName) { + return i, true + } + } + return -1, false +} + +func (d *Document) VariableDefinitionsBefore(variableDefinition int) bool { + for i := range d.OperationDefinitions { + for j, k := range d.OperationDefinitions[i].VariableDefinitions.Refs { + if k == variableDefinition { + return j != 0 + } + } + } + return false +} + +func (d *Document) VariableDefinitionsAfter(variableDefinition int) bool { + for i := range d.OperationDefinitions { + for j, k := range d.OperationDefinitions[i].VariableDefinitions.Refs { + if k == variableDefinition { + return j != len(d.OperationDefinitions[i].VariableDefinitions.Refs)-1 + } + } + } + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location.go new file mode 100644 index 00000000000..ef8d3e12db2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location.go @@ -0,0 +1,162 @@ +//go:generate stringer -type=DirectiveLocation -output directive_location_string.go + +package ast + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type DirectiveLocation int + +const ( + DirectiveLocationUnknown DirectiveLocation = iota + ExecutableDirectiveLocationQuery + ExecutableDirectiveLocationMutation + ExecutableDirectiveLocationSubscription + ExecutableDirectiveLocationField + ExecutableDirectiveLocationFragmentDefinition + ExecutableDirectiveLocationFragmentSpread + ExecutableDirectiveLocationInlineFragment + ExecutableDirectiveLocationVariableDefinition + + TypeSystemDirectiveLocationSchema + TypeSystemDirectiveLocationScalar + TypeSystemDirectiveLocationObject + TypeSystemDirectiveLocationFieldDefinition + TypeSystemDirectiveLocationArgumentDefinition + TypeSystemDirectiveLocationInterface + TypeSystemDirectiveLocationUnion + TypeSystemDirectiveLocationEnum + TypeSystemDirectiveLocationEnumValue + TypeSystemDirectiveLocationInputObject + TypeSystemDirectiveLocationInputFieldDefinition +) + +var ( + locations = map[string]DirectiveLocation{ + "QUERY": ExecutableDirectiveLocationQuery, + "MUTATION": ExecutableDirectiveLocationMutation, + "SUBSCRIPTION": ExecutableDirectiveLocationSubscription, + "FIELD": ExecutableDirectiveLocationField, + "FRAGMENT_DEFINITION": ExecutableDirectiveLocationFragmentDefinition, + "FRAGMENT_SPREAD": ExecutableDirectiveLocationFragmentSpread, + "INLINE_FRAGMENT": ExecutableDirectiveLocationInlineFragment, + "VARIABLE_DEFINITION": ExecutableDirectiveLocationVariableDefinition, + "SCHEMA": TypeSystemDirectiveLocationSchema, + "SCALAR": TypeSystemDirectiveLocationScalar, + "OBJECT": TypeSystemDirectiveLocationObject, + "FIELD_DEFINITION": TypeSystemDirectiveLocationFieldDefinition, + "ARGUMENT_DEFINITION": TypeSystemDirectiveLocationArgumentDefinition, + "INTERFACE": TypeSystemDirectiveLocationInterface, + "UNION": TypeSystemDirectiveLocationUnion, + "ENUM": TypeSystemDirectiveLocationEnum, + "ENUM_VALUE": TypeSystemDirectiveLocationEnumValue, + "INPUT_OBJECT": TypeSystemDirectiveLocationInputObject, + "INPUT_FIELD_DEFINITION": TypeSystemDirectiveLocationInputFieldDefinition, + } +) + +type DirectiveLocations struct { + storage [20]bool +} + +func (d *DirectiveLocations) Get(location DirectiveLocation) bool { + return d.storage[location] +} + +func (d *DirectiveLocations) Set(location DirectiveLocation) { + d.storage[location] = true +} + +func (d *DirectiveLocations) Unset(location DirectiveLocation) { + d.storage[location] = false +} + +func (d *DirectiveLocations) Iterable() DirectiveLocationIterable { + return DirectiveLocationIterable{ + locations: *d, + } +} + +func (d *DirectiveLocations) SetFromRaw(bytes []byte) error { + + location, exists := locations[string(bytes)] + if !exists { + return fmt.Errorf("invalid directive location: %s", string(bytes)) + } + + d.Set(location) + + return nil +} + +type DirectiveLocationIterable struct { + locations DirectiveLocations + current DirectiveLocation +} + +func (d *DirectiveLocationIterable) Next() bool { + for i := d.current + 1; i < 20; i++ { + if d.locations.storage[i] { + d.current = i + return true + } + } + return false +} + +func (d *DirectiveLocationIterable) Value() DirectiveLocation { + return d.current +} + +func (d DirectiveLocation) LiteralBytes() ByteSlice { + switch d { + case ExecutableDirectiveLocationQuery: + return literal.LocationQuery + case ExecutableDirectiveLocationMutation: + return literal.LocationMutation + case ExecutableDirectiveLocationSubscription: + return literal.LocationSubscription + case ExecutableDirectiveLocationField: + return literal.LocationField + case ExecutableDirectiveLocationFragmentDefinition: + return literal.LocationFragmentDefinition + case ExecutableDirectiveLocationFragmentSpread: + return literal.LocationFragmentSpread + case ExecutableDirectiveLocationInlineFragment: + return literal.LocationInlineFragment + case ExecutableDirectiveLocationVariableDefinition: + return literal.LocationVariableDefinition + case TypeSystemDirectiveLocationSchema: + return literal.LocationSchema + case TypeSystemDirectiveLocationScalar: + return literal.LocationScalar + case TypeSystemDirectiveLocationObject: + return literal.LocationObject + case TypeSystemDirectiveLocationFieldDefinition: + return literal.LocationFieldDefinition + case TypeSystemDirectiveLocationArgumentDefinition: + return literal.LocationArgumentDefinition + case TypeSystemDirectiveLocationInterface: + return literal.LocationInterface + case TypeSystemDirectiveLocationUnion: + return literal.LocationUnion + case TypeSystemDirectiveLocationEnum: + return literal.LocationEnum + case TypeSystemDirectiveLocationEnumValue: + return literal.LocationEnumValue + case TypeSystemDirectiveLocationInputObject: + return literal.LocationInputObject + case TypeSystemDirectiveLocationInputFieldDefinition: + return literal.LocationInputFieldDefinition + default: + return nil + } +} + +func (d DirectiveLocation) LiteralString() string { + return unsafebytes.BytesToString(d.LiteralBytes()) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location_string.go new file mode 100644 index 00000000000..505cb729df8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/directive_location_string.go @@ -0,0 +1,42 @@ +// Code generated by "stringer -type=DirectiveLocation"; DO NOT EDIT. + +package ast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[DirectiveLocationUnknown-0] + _ = x[ExecutableDirectiveLocationQuery-1] + _ = x[ExecutableDirectiveLocationMutation-2] + _ = x[ExecutableDirectiveLocationSubscription-3] + _ = x[ExecutableDirectiveLocationField-4] + _ = x[ExecutableDirectiveLocationFragmentDefinition-5] + _ = x[ExecutableDirectiveLocationFragmentSpread-6] + _ = x[ExecutableDirectiveLocationInlineFragment-7] + _ = x[ExecutableDirectiveLocationVariableDefinition-8] + _ = x[TypeSystemDirectiveLocationSchema-9] + _ = x[TypeSystemDirectiveLocationScalar-10] + _ = x[TypeSystemDirectiveLocationObject-11] + _ = x[TypeSystemDirectiveLocationFieldDefinition-12] + _ = x[TypeSystemDirectiveLocationArgumentDefinition-13] + _ = x[TypeSystemDirectiveLocationInterface-14] + _ = x[TypeSystemDirectiveLocationUnion-15] + _ = x[TypeSystemDirectiveLocationEnum-16] + _ = x[TypeSystemDirectiveLocationEnumValue-17] + _ = x[TypeSystemDirectiveLocationInputObject-18] + _ = x[TypeSystemDirectiveLocationInputFieldDefinition-19] +} + +const _DirectiveLocation_name = "DirectiveLocationUnknownExecutableDirectiveLocationQueryExecutableDirectiveLocationMutationExecutableDirectiveLocationSubscriptionExecutableDirectiveLocationFieldExecutableDirectiveLocationFragmentDefinitionExecutableDirectiveLocationFragmentSpreadExecutableDirectiveLocationInlineFragmentExecutableDirectiveLocationVariableDefinitionTypeSystemDirectiveLocationSchemaTypeSystemDirectiveLocationScalarTypeSystemDirectiveLocationObjectTypeSystemDirectiveLocationFieldDefinitionTypeSystemDirectiveLocationArgumentDefinitionTypeSystemDirectiveLocationInterfaceTypeSystemDirectiveLocationUnionTypeSystemDirectiveLocationEnumTypeSystemDirectiveLocationEnumValueTypeSystemDirectiveLocationInputObjectTypeSystemDirectiveLocationInputFieldDefinition" + +var _DirectiveLocation_index = [...]uint16{0, 24, 56, 91, 130, 162, 207, 248, 289, 334, 367, 400, 433, 475, 520, 556, 588, 619, 655, 693, 740} + +func (d DirectiveLocation) String() string { + if d < 0 || d >= DirectiveLocation(len(_DirectiveLocation_index)-1) { + return "DirectiveLocation(" + strconv.FormatInt(int64(d), 10) + ")" + } + return _DirectiveLocation_name[_DirectiveLocation_index[d]:_DirectiveLocation_index[d+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/helpers.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/helpers.go new file mode 100644 index 00000000000..1121d1aee78 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/helpers.go @@ -0,0 +1,17 @@ +package ast + +// indexOf - simple helper to find an index of a ref within refs slice +func indexOf(refs []int, ref int) (int, bool) { + for i, j := range refs { + if ref == j { + return i, true + } + } + return -1, false +} + +// deleteRef - is a slice trick to remove an item with preserving items order +// Note: danger modifies pointer to the arr +func deleteRef(refs *[]int, index int) { + *refs = append((*refs)[:index], (*refs)[index+1:]...) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/index.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/index.go new file mode 100644 index 00000000000..b9d8bb03d2b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/index.go @@ -0,0 +1,174 @@ +package ast + +import ( + "bytes" + + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" +) + +// Index is a struct to easily look up objects in a document, e.g. find Nodes (type/interface/union definitions) by name +type Index struct { + // QueryTypeName is the name of the query type on the schema Node + // schema { query: Query } + QueryTypeName ByteSlice + // MutationTypeName is the name of the mutation type on the schema Node + // schema { mutation: Mutation } + MutationTypeName ByteSlice + // SubscriptionTypeName is the name of the subscription type on the schema Node + // schema { subscription: Subscription } + SubscriptionTypeName ByteSlice + // nodes is a list of all root nodes in a schema definition + // The map key is the result of the xxhash algorithm from the Node name. + nodes map[uint64][]Node + // ReplacedFragmentSpreads is a list of references (slice indices) of all FragmentSpreads that got replaced during normalization. + ReplacedFragmentSpreads []int + // MergedTypeExtensions is a list of Nodes (Node kind + reference) that got merged during type extension merging. + MergedTypeExtensions []Node +} + +// Reset empties the Index +func (i *Index) Reset() { + i.QueryTypeName = i.QueryTypeName[:0] + i.MutationTypeName = i.MutationTypeName[:0] + i.SubscriptionTypeName = i.SubscriptionTypeName[:0] + i.ReplacedFragmentSpreads = i.ReplacedFragmentSpreads[:0] + i.MergedTypeExtensions = i.MergedTypeExtensions[:0] + for j := range i.nodes { + delete(i.nodes, j) + } +} + +func (i *Index) AddNodeStr(name string, node Node) { + hash := xxhash.Sum64String(name) + _, exists := i.nodes[hash] + if !exists { + i.nodes[hash] = []Node{node} + return + } + i.nodes[hash] = append(i.nodes[hash], node) +} + +func (i *Index) AddNodeBytes(name []byte, node Node) { + hash := xxhash.Sum64(name) + _, exists := i.nodes[hash] + if !exists { + i.nodes[hash] = []Node{node} + return + } + i.nodes[hash] = append(i.nodes[hash], node) +} + +func (i *Index) NodesByNameStr(name string) ([]Node, bool) { + hash := xxhash.Sum64String(name) + node, exists := i.nodes[hash] + return node, exists +} + +func (i *Index) FirstNodeByNameStr(name string) (Node, bool) { + hash := xxhash.Sum64String(name) + node, exists := i.nodes[hash] + if !exists || len(node) == 0 { + return InvalidNode, false + } + return node[0], true +} + +func (i *Index) NodesByNameBytes(name []byte) ([]Node, bool) { + hash := xxhash.Sum64(name) + node, exists := i.nodes[hash] + return node, exists +} + +func (i *Index) FirstNodeByNameBytes(name []byte) (Node, bool) { + hash := xxhash.Sum64(name) + node, exists := i.nodes[hash] + if !exists || len(node) == 0 { + return InvalidNode, false + } + return node[0], true +} + +func (i *Index) FirstNonExtensionNodeByNameBytes(name []byte) (Node, bool) { + hash := xxhash.Sum64(name) + nodes, exists := i.nodes[hash] + if !exists || len(nodes) == 0 { + return InvalidNode, false + } + + for j := range nodes { + if nodes[j].IsExtensionKind() { + continue + } + + return nodes[j], true + } + + return InvalidNode, false +} + +func (i *Index) RemoveNodeByName(name []byte) { + hash := xxhash.Sum64(name) + delete(i.nodes, hash) + + if bytes.Equal(i.QueryTypeName, name) { + i.QueryTypeName = nil + } + + if bytes.Equal(i.MutationTypeName, name) { + i.MutationTypeName = nil + } + + if bytes.Equal(i.SubscriptionTypeName, name) { + i.SubscriptionTypeName = nil + } +} + +func (i *Index) ReplaceNode(name []byte, oldNode Node, newNode Node) { + nodes, ok := i.nodes[xxhash.Sum64(name)] + if !ok { + return + } + + for i := range nodes { + if nodes[i].Kind != oldNode.Kind || nodes[i].Ref != oldNode.Ref { + continue + } + + nodes[i].Kind = newNode.Kind + nodes[i].Ref = newNode.Ref + } +} + +func (i *Index) IsRootOperationTypeNameBytes(typeName []byte) bool { + if len(typeName) == 0 { + return false + } + if bytes.Equal(i.QueryTypeName, typeName) { + return true + } + if bytes.Equal(i.MutationTypeName, typeName) { + return true + } + if bytes.Equal(i.SubscriptionTypeName, typeName) { + return true + } + return false +} + +func (i *Index) IsRootOperationTypeNameString(typeName string) bool { + if typeName == "" { + return false + } + if unsafebytes.BytesToString(i.QueryTypeName) == typeName { + return true + } + if unsafebytes.BytesToString(i.MutationTypeName) == typeName { + return true + } + if unsafebytes.BytesToString(i.SubscriptionTypeName) == typeName { + return true + } + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/input.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/input.go new file mode 100644 index 00000000000..c3649466989 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/input.go @@ -0,0 +1,155 @@ +package ast + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +// Input is a raw graphql document containing the raw input + meta data +type Input struct { + // RawBytes is the raw byte input + RawBytes []byte + // Length of RawBytes + Length int + // InputPosition is the current position in the RawBytes + InputPosition int + // TextPosition is the current position within the text (line and character information about the current Tokens) + TextPosition position.Position + // Variables are the json encoded variables of the operation + Variables []byte +} + +// Reset empties the Input +func (i *Input) Reset() { + i.RawBytes = i.RawBytes[:0] + i.Variables = i.Variables[:0] + i.InputPosition = 0 + i.TextPosition.Reset() +} + +// ResetInputBytes empties the input and sets it to bytes argument +func (i *Input) ResetInputBytes(bytes []byte) { + i.Reset() + i.AppendInputBytes(bytes) + i.Length = len(i.RawBytes) +} + +// ResetInputString empties the input and sets it to input string. +func (i *Input) ResetInputString(input string) { + i.ResetInputBytes([]byte(input)) +} + +// AppendInputBytes appends a byte slice to the current input and returns the ByteSliceReference +func (i *Input) AppendInputBytes(bytes []byte) (ref ByteSliceReference) { + ref.Start = uint32(len(i.RawBytes)) + i.RawBytes = append(i.RawBytes, bytes...) + i.Length = len(i.RawBytes) + ref.End = uint32(len(i.RawBytes)) + return +} + +// AppendInputString appends a string to the current input and returns the ByteSliceReference +func (i *Input) AppendInputString(input string) ByteSliceReference { + return i.AppendInputBytes([]byte(input)) +} + +// ByteSlice returns the byte slice for a given byte ByteSliceReference +func (i *Input) ByteSlice(reference ByteSliceReference) ByteSlice { + return i.RawBytes[reference.Start:reference.End] +} + +// ByteSliceString returns a string for a given ByteSliceReference +func (i *Input) ByteSliceString(reference ByteSliceReference) string { + return unsafebytes.BytesToString(i.ByteSlice(reference)) +} + +// ByteSliceReferenceContentEquals compares the content of two byte slices and returns true if they are the same +func (i *Input) ByteSliceReferenceContentEquals(left, right ByteSliceReference) bool { + if left.Length() != right.Length() { + return false + } + length := int(left.Length()) + for k := 0; k < length; k++ { + if i.RawBytes[int(left.Start)+k] != i.RawBytes[int(right.Start)+k] { + return false + } + } + return true +} + +// ByteSlice is an alias for []byte +type ByteSlice []byte + +// Equals compares a ByteSlice to another +func (b ByteSlice) Equals(another ByteSlice) bool { + if len(b) != len(another) { + return false + } + return bytes.Equal(b, another) +} + +func (b ByteSlice) String() string { + return unsafebytes.BytesToString(b) +} + +func (b ByteSlice) MarshalJSON() ([]byte, error) { + return append(append(literal.QUOTE, b...), literal.QUOTE...), nil +} + +type ByteSlices []ByteSlice + +func (b ByteSlices) String() string { + out := "[" + for i := range b { + if i != 0 { + out += "," + } + out += string(b[i]) + } + out += "]" + return out +} + +type ByteSliceReference struct { + Start uint32 + End uint32 +} + +func (b ByteSliceReference) Length() uint32 { + return b.End - b.Start +} + +// ByteSliceEquals compares two ByteSliceReferences from different Inputs +func ByteSliceEquals(left ByteSliceReference, leftInput Input, right ByteSliceReference, rightInput Input) bool { + if left.Length() != right.Length() { + return false + } + length := int(left.Length()) + for i := 0; i < length; i++ { + if leftInput.RawBytes[int(left.Start)+i] != rightInput.RawBytes[int(right.Start)+i] { + return false + } + } + return true +} + +type ByteSliceReferences []ByteSliceReference + +func (b ByteSliceReferences) String(input *Input) string { + out := "[" + for i := range b { + if i != 0 { + out += "," + } + if b[i].Length() == 0 { + out += "query" + } else { + out += input.ByteSliceString(b[i]) + } + } + out += "]" + return out +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/path.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/path.go new file mode 100644 index 00000000000..b97fe89bced --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/ast/path.go @@ -0,0 +1,113 @@ +package ast + +import ( + "bytes" + "fmt" + "strconv" + "unsafe" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" +) + +type PathKind int + +const ( + UnknownPathKind PathKind = iota + ArrayIndex + FieldName +) + +type PathItem struct { + Kind PathKind + ArrayIndex int + FieldName ByteSlice +} + +type Path []PathItem + +func (p Path) Equals(another Path) bool { + if len(p) != len(another) { + return false + } + for i := range p { + if p[i].Kind != another[i].Kind { + return false + } + if p[i].Kind == ArrayIndex && p[i].ArrayIndex != another[i].ArrayIndex { + return false + } else if !bytes.Equal(p[i].FieldName, another[i].FieldName) { + return false + } + } + return true +} + +func (p Path) String() string { + out := "[" + for i := range p { + if i != 0 { + out += "," + } + switch p[i].Kind { + case ArrayIndex: + out += strconv.Itoa(p[i].ArrayIndex) + case FieldName: + if len(p[i].FieldName) == 0 { + out += "query" + } else { + out += unsafebytes.BytesToString(p[i].FieldName) + } + } + } + out += "]" + return out +} + +func (p Path) DotDelimitedString() string { + out := "" + for i := range p { + if i != 0 { + out += "." + } + switch p[i].Kind { + case ArrayIndex: + out += strconv.Itoa(p[i].ArrayIndex) + case FieldName: + if len(p[i].FieldName) == 0 { + out += "query" + } else { + out += unsafebytes.BytesToString(p[i].FieldName) + } + } + } + return out +} + +func (p *PathItem) UnmarshalJSON(data []byte) error { + if data == nil { + return fmt.Errorf("data must not be nil") + } + if data[0] == '"' && data[len(data)-1] == '"' { + p.Kind = FieldName + p.FieldName = data[1 : len(data)-1] + return nil + } + out, err := strconv.ParseInt(*(*string)(unsafe.Pointer(&data)), 10, 32) + if err != nil { + return err + } + p.Kind = ArrayIndex + p.ArrayIndex = int(out) + return nil +} + +func (p PathItem) MarshalJSON() ([]byte, error) { + switch p.Kind { + case ArrayIndex: + return strconv.AppendInt(nil, int64(p.ArrayIndex), 10), nil + case FieldName: + return append([]byte("\""), append(p.FieldName, []byte("\"")...)...), nil + default: + return nil, fmt.Errorf("cannot marshal unknown PathKind") + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astimport/astimport.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astimport/astimport.go new file mode 100644 index 00000000000..28e6dcb7cfe --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astimport/astimport.go @@ -0,0 +1,229 @@ +// Package astimport can be used to import Nodes manually into an AST. +// +// This is useful when an AST should be created manually. +package astimport + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +// Importer imports Nodes into an existing AST. +// Always use NewImporter() to create a new Importer. +type Importer struct { +} + +func (i *Importer) ImportDirective(ref int, from, to *ast.Document) int { + name := string(from.Input.ByteSlice(from.Directives[ref].Name)) + args := i.ImportArguments(from.Directives[ref].Arguments.Refs, from, to) + return to.AddDirective(ast.Directive{ + Name: to.Input.AppendInputString(name), + HasArguments: len(args) != 0, + Arguments: ast.ArgumentList{ + Refs: args, + }, + }) +} + +func (i *Importer) ImportDirectiveWithRename(ref int, renameTo string, from, to *ast.Document) int { + args := i.ImportArguments(from.Directives[ref].Arguments.Refs, from, to) + return to.AddDirective(ast.Directive{ + Name: to.Input.AppendInputString(renameTo), + HasArguments: len(args) != 0, + Arguments: ast.ArgumentList{ + Refs: args, + }, + }) +} + +func (i *Importer) ImportType(ref int, from, to *ast.Document) int { + + astType := ast.Type{ + TypeKind: from.Types[ref].TypeKind, + OfType: -1, + } + + if astType.TypeKind == ast.TypeKindNamed { + astType.Name = to.Input.AppendInputBytes(from.TypeNameBytes(ref)) + } + + if from.Types[ref].OfType != -1 { + astType.OfType = i.ImportType(from.Types[ref].OfType, from, to) + } + + to.Types = append(to.Types, astType) + return len(to.Types) - 1 +} + +func (i *Importer) ImportTypeWithRename(ref int, from, to *ast.Document, renameTo string) int { + + astType := ast.Type{ + TypeKind: from.Types[ref].TypeKind, + OfType: -1, + } + + if astType.TypeKind == ast.TypeKindNamed { + astType.Name = to.Input.AppendInputString(renameTo) + } + + if from.Types[ref].OfType != -1 { + astType.OfType = i.ImportTypeWithRename(from.Types[ref].OfType, from, to, renameTo) + } + + to.Types = append(to.Types, astType) + return len(to.Types) - 1 +} + +func (i *Importer) ImportValue(fromValue ast.Value, from, to *ast.Document) (value ast.Value) { + value.Kind = fromValue.Kind + + switch fromValue.Kind { + case ast.ValueKindFloat: + value.Ref = to.ImportFloatValue( + from.FloatValueRaw(fromValue.Ref), + from.FloatValueIsNegative(fromValue.Ref)) + + case ast.ValueKindInteger: + value.Ref = to.ImportIntValue( + from.IntValueRaw(fromValue.Ref), + from.IntValueIsNegative(fromValue.Ref)) + + case ast.ValueKindBoolean: + value.Ref = fromValue.Ref + + case ast.ValueKindString: + value.Ref = to.ImportStringValue( + from.StringValueContentBytes(fromValue.Ref), + from.StringValueIsBlockString(fromValue.Ref)) + + case ast.ValueKindNull: + // empty case + + case ast.ValueKindEnum: + value.Ref = to.ImportEnumValue(from.EnumValueNameBytes(fromValue.Ref)) + + case ast.ValueKindVariable: + value.Ref = to.ImportVariableValue(from.VariableValueNameBytes(fromValue.Ref)) + + case ast.ValueKindList: + value.Ref = to.ImportListValue(i.ImportListValues(fromValue.Ref, from, to)) + + case ast.ValueKindObject: + value.Ref = to.ImportObjectValue(i.ImportObjectFields(fromValue.Ref, from, to)) + + default: + value.Kind = ast.ValueKindUnknown + fmt.Printf("astimport.Importer.ImportValue: not implemented for ValueKind: %s\n", fromValue.Kind) + } + return +} + +func (i *Importer) ImportObjectFields(ref int, from, to *ast.Document) (refs []int) { + objValue := from.ObjectValues[ref] + + for _, fieldRef := range objValue.Refs { + objectField := from.ObjectFields[fieldRef] + + refs = append(refs, to.ImportObjectField( + from.ObjectFieldNameBytes(fieldRef), + i.ImportValue(objectField.Value, from, to))) + } + return +} + +func (i *Importer) ImportListValues(ref int, from, to *ast.Document) (refs []int) { + listValue := from.ListValues[ref] + + for _, valueRef := range listValue.Refs { + value := i.ImportValue(from.Values[valueRef], from, to) + refs = append(refs, to.AddValue(value)) + } + return +} + +func (i *Importer) ImportArgument(ref int, from, to *ast.Document) int { + arg := ast.Argument{ + Name: to.Input.AppendInputBytes(from.ArgumentNameBytes(ref)), + Value: i.ImportValue(from.ArgumentValue(ref), from, to), + } + to.Arguments = append(to.Arguments, arg) + return len(to.Arguments) - 1 +} + +func (i *Importer) ImportArguments(refs []int, from, to *ast.Document) []int { + args := make([]int, len(refs)) + for j, k := range refs { + args[j] = i.ImportArgument(k, from, to) + } + return args +} + +func (i *Importer) ImportVariableDefinition(ref int, from, to *ast.Document) int { + + variableDefinition := ast.VariableDefinition{ + VariableValue: i.ImportValue(from.VariableDefinitions[ref].VariableValue, from, to), + Type: i.ImportType(from.VariableDefinitions[ref].Type, from, to), + DefaultValue: ast.DefaultValue{ + IsDefined: from.VariableDefinitions[ref].DefaultValue.IsDefined, + }, + // HasDirectives: false, //TODO: implement import directives + // Directives: ast.DirectiveList{}, + } + + if from.VariableDefinitions[ref].DefaultValue.IsDefined { + variableDefinition.DefaultValue.Value = i.ImportValue(from.VariableDefinitions[ref].DefaultValue.Value, from, to) + } + + to.VariableDefinitions = append(to.VariableDefinitions, variableDefinition) + return len(to.VariableDefinitions) - 1 +} + +func (i *Importer) ImportVariableDefinitionWithRename(ref int, from, to *ast.Document, renameTo string) int { + + variableDefinition := ast.VariableDefinition{ + VariableValue: i.ImportValue(from.VariableDefinitions[ref].VariableValue, from, to), + Type: i.ImportTypeWithRename(from.VariableDefinitions[ref].Type, from, to, renameTo), + DefaultValue: ast.DefaultValue{ + IsDefined: from.VariableDefinitions[ref].DefaultValue.IsDefined, + }, + // HasDirectives: false, //TODO: implement import directives + // Directives: ast.DirectiveList{}, + } + + if from.VariableDefinitions[ref].DefaultValue.IsDefined { + variableDefinition.DefaultValue.Value = i.ImportValue(from.VariableDefinitions[ref].DefaultValue.Value, from, to) + } + + to.VariableDefinitions = append(to.VariableDefinitions, variableDefinition) + return len(to.VariableDefinitions) - 1 +} + +func (i *Importer) ImportVariableDefinitions(refs []int, from, to *ast.Document) []int { + definitions := make([]int, len(refs)) + for j, k := range refs { + definitions[j] = i.ImportVariableDefinition(k, from, to) + } + return definitions +} + +func (i *Importer) ImportField(ref int, from, to *ast.Document) int { + field := ast.Field{ + Alias: ast.Alias{ + IsDefined: from.FieldAliasIsDefined(ref), + }, + Name: to.Input.AppendInputBytes(from.FieldNameBytes(ref)), + HasArguments: from.FieldHasArguments(ref), + // HasDirectives: from.FieldHasDirectives(ref), // HasDirectives: false, //TODO: implement import directives + SelectionSet: -1, + HasSelections: false, + } + if field.Alias.IsDefined { + field.Alias.Name = to.Input.AppendInputBytes(from.FieldAliasBytes(ref)) + } + if field.HasArguments { + field.Arguments.Refs = i.ImportArguments(from.FieldArguments(ref), from, to) + } + to.Fields = append(to.Fields, field) + return len(to.Fields) - 1 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/astnormalization.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/astnormalization.go new file mode 100644 index 00000000000..ff2e1814367 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/astnormalization.go @@ -0,0 +1,239 @@ +/*Package astnormalization helps to transform parsed GraphQL AST's into a easier to use structure. + +Example + +This examples shows how the normalization package helps "simplifying" a GraphQL AST. + +Input: + + subscription sub { + ... multipleSubscriptions + ... on Subscription { + newMessage { + body + sender + } + } + } + fragment newMessageFields on Message { + body: body + sender + ... on Body { + body + } + } + fragment multipleSubscriptions on Subscription { + newMessage { + body + sender + } + newMessage { + ... newMessageFields + } + newMessage { + body + body + sender + } + ... on Subscription { + newMessage { + body + sender + } + } + disallowedSecondRootField + } + +Output: + + subscription sub { + newMessage { + body + sender + } + disallowedSecondRootField + } + fragment newMessageFields on Message { + body + sender + } + fragment multipleSubscriptions on Subscription { + newMessage { + body + sender + } + disallowedSecondRootField + } +*/ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// NormalizeOperation creates a default Normalizer and applies all rules to a given AST +// In case you're using OperationNormalizer in a hot path you shouldn't be using this function. +// Create a new OperationNormalizer using NewNormalizer() instead and re-use it. +func NormalizeOperation(operation, definition *ast.Document, report *operationreport.Report) { + normalizer := NewNormalizer(false, false) + normalizer.NormalizeOperation(operation, definition, report) +} + +func NormalizeNamedOperation(operation, definition *ast.Document, operationName []byte, report *operationreport.Report) { + normalizer := NewNormalizer(true, true) + normalizer.NormalizeNamedOperation(operation, definition, operationName, report) +} + +// OperationNormalizer walks a given AST and applies all registered rules +type OperationNormalizer struct { + operationWalkers []*astvisitor.Walker + variablesExtraction *variablesExtractionVisitor + options options + definitionNormalizer *DefinitionNormalizer +} + +// NewNormalizer creates a new OperationNormalizer and sets up all default rules +func NewNormalizer(removeFragmentDefinitions, extractVariables bool) *OperationNormalizer { + normalizer := &OperationNormalizer{ + options: options{ + removeFragmentDefinitions: removeFragmentDefinitions, + extractVariables: extractVariables, + }, + } + normalizer.setupOperationWalkers() + return normalizer +} + +// NewWithOpts creates a new OperationNormalizer with Options +func NewWithOpts(opts ...Option) *OperationNormalizer { + var options options + for _, opt := range opts { + opt(&options) + } + normalizer := &OperationNormalizer{ + options: options, + } + normalizer.setupOperationWalkers() + + if options.normalizeDefinition { + normalizer.definitionNormalizer = NewDefinitionNormalizer() + } + + return normalizer +} + +type options struct { + removeFragmentDefinitions bool + extractVariables bool + removeUnusedVariables bool + normalizeDefinition bool +} + +type Option func(options *options) + +func WithExtractVariables() Option { + return func(options *options) { + options.extractVariables = true + } +} + +func WithRemoveFragmentDefinitions() Option { + return func(options *options) { + options.removeFragmentDefinitions = true + } +} + +func WithRemoveUnusedVariables() Option { + return func(options *options) { + options.removeUnusedVariables = true + } +} + +func WithNormalizeDefinition() Option { + return func(options *options) { + options.normalizeDefinition = true + } +} + +func (o *OperationNormalizer) setupOperationWalkers() { + o.operationWalkers = make([]*astvisitor.Walker, 0, 4) + + fragmentInline := astvisitor.NewWalker(48) + fragmentSpreadInline(&fragmentInline) + directiveIncludeSkip(&fragmentInline) + o.operationWalkers = append(o.operationWalkers, &fragmentInline) + + if o.options.extractVariables { + extractVariablesWalker := astvisitor.NewWalker(48) + o.variablesExtraction = extractVariables(&extractVariablesWalker) + o.operationWalkers = append(o.operationWalkers, &extractVariablesWalker) + } + + other := astvisitor.NewWalker(48) + removeSelfAliasing(&other) + mergeInlineFragments(&other) + mergeFieldSelections(&other) + deduplicateFields(&other) + + if o.options.removeFragmentDefinitions { + removeFragmentDefinitions(&other) + } + if o.options.removeUnusedVariables { + deleteUnusedVariables(&other) + } + o.operationWalkers = append(o.operationWalkers, &other) + + if o.options.extractVariables { + variablesProcessing := astvisitor.NewWalker(48) + inputCoercionForList(&variablesProcessing) + extractVariablesDefaultValue(&variablesProcessing) + injectInputFieldDefaults(&variablesProcessing) + + o.operationWalkers = append(o.operationWalkers, &variablesProcessing) + } +} + +func (o *OperationNormalizer) prepareDefinition(definition *ast.Document, report *operationreport.Report) { + if o.definitionNormalizer != nil { + o.definitionNormalizer.NormalizeDefinition(definition, report) + } +} + +// NormalizeOperation applies all registered rules to the AST +func (o *OperationNormalizer) NormalizeOperation(operation, definition *ast.Document, report *operationreport.Report) { + if o.options.normalizeDefinition { + o.prepareDefinition(definition, report) + if report.HasErrors() { + return + } + } + + for i := range o.operationWalkers { + o.operationWalkers[i].Walk(operation, definition, report) + if report.HasErrors() { + return + } + } +} + +// NormalizeNamedOperation applies all registered rules to one specific named operation in the AST +func (o *OperationNormalizer) NormalizeNamedOperation(operation, definition *ast.Document, operationName []byte, report *operationreport.Report) { + if o.options.normalizeDefinition { + o.prepareDefinition(definition, report) + if report.HasErrors() { + return + } + } + + if o.variablesExtraction != nil { + o.variablesExtraction.operationName = operationName + } + for i := range o.operationWalkers { + o.operationWalkers[i].Walk(operation, definition, report) + if report.HasErrors() { + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/definition_normalization.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/definition_normalization.go new file mode 100644 index 00000000000..2ec543b4abf --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/definition_normalization.go @@ -0,0 +1,67 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// NormalizeDefinition creates a default DefinitionNormalizer and applies all rules to a given AST +// In case you're using DefinitionNormalizer in a hot path you shouldn't be using this function. +// Create a new DefinitionNormalizer using NewDefinitionNormalizer() instead and re-use it. +func NormalizeDefinition(definition *ast.Document, report *operationreport.Report) { + normalizer := NewDefinitionNormalizer() + normalizer.NormalizeDefinition(definition, report) +} + +// DefinitionNormalizer walks a given AST and applies all registered rules +type DefinitionNormalizer struct { + walker *astvisitor.Walker +} + +// NewDefinitionNormalizer creates a new DefinitionNormalizer and sets up all default rules +func NewDefinitionNormalizer() *DefinitionNormalizer { + normalizer := &DefinitionNormalizer{} + normalizer.setupWalkers() + return normalizer +} + +func (o *DefinitionNormalizer) setupWalkers() { + walker := astvisitor.NewWalker(48) + + extendObjectTypeDefinition(&walker) + extendInputObjectTypeDefinition(&walker) + extendEnumTypeDefinition(&walker) + extendInterfaceTypeDefinition(&walker) + extendScalarTypeDefinition(&walker) + extendUnionTypeDefinition(&walker) + removeMergedTypeExtensions(&walker) + implicitSchemaDefinition(&walker) + + o.walker = &walker +} + +func NewSubgraphDefinitionNormalizer() *DefinitionNormalizer { + normalizer := &DefinitionNormalizer{} + normalizer.setupSubgraphWalkers() + return normalizer +} + +func (o *DefinitionNormalizer) setupSubgraphWalkers() { + walker := astvisitor.NewWalker(48) + + extendObjectTypeDefinitionKeepingOrphans(&walker) + extendInputObjectTypeDefinitionKeepingOrphans(&walker) + extendEnumTypeDefinitionKeepingOrphans(&walker) + extendInterfaceTypeDefinitionKeepingOrphans(&walker) + extendScalarTypeDefinitionKeepingOrphans(&walker) + extendUnionTypeDefinitionKeepingOrphans(&walker) + removeMergedTypeExtensions(&walker) + + o.walker = &walker +} + +// NormalizeDefinition applies all registered rules to the AST +func (o *DefinitionNormalizer) NormalizeDefinition(definition *ast.Document, report *operationreport.Report) { + o.walker.Walk(definition, nil, report) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/directive_include_skip.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/directive_include_skip.go new file mode 100644 index 00000000000..5a8e3a1b7cd --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/directive_include_skip.go @@ -0,0 +1,87 @@ +package astnormalization + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +func directiveIncludeSkip(walker *astvisitor.Walker) { + visitor := directiveIncludeSkipVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterDirectiveVisitor(&visitor) +} + +type directiveIncludeSkipVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (d *directiveIncludeSkipVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation = operation + d.definition = definition +} + +func (d *directiveIncludeSkipVisitor) EnterDirective(ref int) { + + name := d.operation.DirectiveNameBytes(ref) + + switch { + case bytes.Equal(name, literal.INCLUDE): + d.handleInclude(ref) + case bytes.Equal(name, literal.SKIP): + d.handleSkip(ref) + } +} + +func (d *directiveIncludeSkipVisitor) handleSkip(ref int) { + if len(d.operation.Directives[ref].Arguments.Refs) != 1 { + return + } + arg := d.operation.Directives[ref].Arguments.Refs[0] + if !bytes.Equal(d.operation.ArgumentNameBytes(arg), literal.IF) { + return + } + value := d.operation.ArgumentValue(arg) + if value.Kind != ast.ValueKindBoolean { + return + } + include := d.operation.BooleanValue(value.Ref) + switch include { + case false: + d.operation.RemoveDirectiveFromNode(d.Ancestors[len(d.Ancestors)-1], ref) + case true: + if len(d.Ancestors) < 2 { + return + } + d.operation.RemoveNodeFromNode(d.Ancestors[len(d.Ancestors)-1], d.Ancestors[len(d.Ancestors)-2]) + } +} + +func (d *directiveIncludeSkipVisitor) handleInclude(ref int) { + if len(d.operation.Directives[ref].Arguments.Refs) != 1 { + return + } + arg := d.operation.Directives[ref].Arguments.Refs[0] + if !bytes.Equal(d.operation.ArgumentNameBytes(arg), literal.IF) { + return + } + value := d.operation.ArgumentValue(arg) + if value.Kind != ast.ValueKindBoolean { + return + } + include := d.operation.BooleanValue(value.Ref) + switch include { + case true: + d.operation.RemoveDirectiveFromNode(d.Ancestors[len(d.Ancestors)-1], ref) + case false: + if len(d.Ancestors) < 2 { + return + } + d.operation.RemoveNodeFromNode(d.Ancestors[len(d.Ancestors)-1], d.Ancestors[len(d.Ancestors)-2]) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/enum_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/enum_type_extending.go new file mode 100644 index 00000000000..228947b2b82 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/enum_type_extending.go @@ -0,0 +1,54 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendEnumTypeDefinition(walker *astvisitor.Walker) { + visitor := extendEnumTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterEnumTypeExtensionVisitor(&visitor) +} + +func extendEnumTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendEnumTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterEnumTypeExtensionVisitor(&visitor) +} + +type extendEnumTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendEnumTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendEnumTypeDefinitionVisitor) EnterEnumTypeExtension(ref int) { + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.EnumTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindEnumTypeDefinition { + continue + } + e.operation.ExtendEnumTypeDefinitionByEnumTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendEnumTypeDefinitionByEnumTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/extends_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/extends_directive.go new file mode 100644 index 00000000000..53201bb56e5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/extends_directive.go @@ -0,0 +1,82 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +var extendsDirectiveName = "extends" + +type extendsDirectiveVisitor struct { + document *ast.Document +} + +func extendsDirective(walker *astvisitor.Walker) { + v := &extendsDirectiveVisitor{} + walker.RegisterEnterDocumentVisitor(v) + walker.RegisterEnterObjectTypeDefinitionVisitor(v) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(v) +} + +func (v *extendsDirectiveVisitor) EnterDocument(document, _ *ast.Document) { + v.document = document +} + +func (v *extendsDirectiveVisitor) EnterObjectTypeDefinition(ref int) { + if !v.document.ObjectTypeDefinitions[ref].Directives.HasDirectiveByName(v.document, extendsDirectiveName) { + return + } + for i := range v.document.RootNodes { + if v.document.RootNodes[i].Ref == ref && v.document.RootNodes[i].Kind == ast.NodeKindObjectTypeDefinition { + // give this node a new NodeKind of ObjectTypeExtension + newRef := v.document.AddObjectTypeDefinitionExtension(ast.ObjectTypeExtension{ObjectTypeDefinition: v.document.ObjectTypeDefinitions[ref]}) + // reflect changes inside the root nodes + v.document.UpdateRootNode(i, newRef, ast.NodeKindObjectTypeExtension) + // only remove @extends if the nodes was updated + v.document.ObjectTypeExtensions[newRef].Directives.RemoveDirectiveByName(v.document, extendsDirectiveName) + // update index + oldIndexNode := ast.Node{ + Kind: ast.NodeKindObjectTypeDefinition, + Ref: ref, + } + + v.document.Index.ReplaceNode(v.document.ObjectTypeExtensionNameBytes(newRef), oldIndexNode, ast.Node{ + Kind: ast.NodeKindObjectTypeExtension, + Ref: newRef, + }) + + break + } + } + +} + +func (v *extendsDirectiveVisitor) EnterInterfaceTypeDefinition(ref int) { + if !v.document.InterfaceTypeDefinitions[ref].Directives.HasDirectiveByName(v.document, extendsDirectiveName) { + return + } + for i := range v.document.RootNodes { + if v.document.RootNodes[i].Kind != ast.NodeKindInterfaceTypeDefinition || v.document.RootNodes[i].Ref != ref { + continue + } + + newRef := v.document.AddInterfaceTypeExtension(ast.InterfaceTypeExtension{ + InterfaceTypeDefinition: v.document.InterfaceTypeDefinitions[ref], + }) + + v.document.UpdateRootNode(i, newRef, ast.NodeKindInterfaceTypeExtension) + v.document.InterfaceTypeExtensions[newRef].Directives.RemoveDirectiveByName(v.document, extendsDirectiveName) + + oldIndexNode := ast.Node{ + Kind: ast.NodeKindInterfaceTypeDefinition, + Ref: ref, + } + + v.document.Index.ReplaceNode(v.document.InterfaceTypeExtensionNameBytes(newRef), oldIndexNode, ast.Node{ + Kind: ast.NodeKindInterfaceTypeExtension, + Ref: newRef, + }) + + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_deduplication.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_deduplication.go new file mode 100644 index 00000000000..246cae62513 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_deduplication.go @@ -0,0 +1,59 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func deduplicateFields(walker *astvisitor.Walker) { + visitor := deduplicateFieldsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterSelectionSetVisitor(&visitor) +} + +type deduplicateFieldsVisitor struct { + *astvisitor.Walker + operation *ast.Document +} + +func (d *deduplicateFieldsVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation = operation +} + +func (d *deduplicateFieldsVisitor) EnterSelectionSet(ref int) { + if len(d.operation.SelectionSets[ref].SelectionRefs) < 2 { + return + } + + for a, i := range d.operation.SelectionSets[ref].SelectionRefs { + if d.operation.Selections[i].Kind != ast.SelectionKindField { + continue + } + left := d.operation.Selections[i].Ref + if d.operation.Fields[left].HasSelections { + continue + } + for b, j := range d.operation.SelectionSets[ref].SelectionRefs { + if a == b { + continue + } + if a > b { + continue + } + if d.operation.Selections[j].Kind != ast.SelectionKindField { + continue + } + right := d.operation.Selections[j].Ref + if d.operation.Fields[right].HasSelections { + continue + } + if d.operation.FieldsAreEqualFlat(left, right) { + d.operation.RemoveFromSelectionSet(ref, b) + d.RevisitNode() + return + } + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_selection_merging.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_selection_merging.go new file mode 100644 index 00000000000..c2b6f5f30d1 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/field_selection_merging.go @@ -0,0 +1,93 @@ +package astnormalization + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func mergeFieldSelections(walker *astvisitor.Walker) { + visitor := fieldSelectionMergeVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterSelectionSetVisitor(&visitor) +} + +type fieldSelectionMergeVisitor struct { + *astvisitor.Walker + operation *ast.Document +} + +func (f *fieldSelectionMergeVisitor) EnterDocument(operation, definition *ast.Document) { + f.operation = operation +} + +func (f *fieldSelectionMergeVisitor) fieldsCanMerge(left, right int) bool { + leftName := f.operation.FieldNameBytes(left) + rightName := f.operation.FieldNameBytes(right) + leftAlias := f.operation.FieldAliasBytes(left) + rightAlias := f.operation.FieldAliasBytes(right) + + if !bytes.Equal(leftName, rightName) || !bytes.Equal(leftAlias, rightAlias) { + return false + } + + leftDirectives := f.operation.FieldDirectives(left) + rightDirectives := f.operation.FieldDirectives(right) + + return f.operation.DirectiveSetsAreEqual(leftDirectives, rightDirectives) +} + +func (f *fieldSelectionMergeVisitor) isFieldSelection(ref int) bool { + return f.operation.Selections[ref].Kind == ast.SelectionKindField +} + +func (f *fieldSelectionMergeVisitor) fieldsHaveSelections(left, right int) bool { + return f.operation.Fields[left].HasSelections && f.operation.Fields[right].HasSelections +} + +func (f *fieldSelectionMergeVisitor) removeSelection(set, i int) { + f.operation.SelectionSets[set].SelectionRefs = append(f.operation.SelectionSets[set].SelectionRefs[:i], f.operation.SelectionSets[set].SelectionRefs[i+1:]...) +} + +func (f *fieldSelectionMergeVisitor) mergeFields(left, right int) { + leftSet := f.operation.Fields[left].SelectionSet + rightSet := f.operation.Fields[right].SelectionSet + f.operation.SelectionSets[leftSet].SelectionRefs = append(f.operation.SelectionSets[leftSet].SelectionRefs, f.operation.SelectionSets[rightSet].SelectionRefs...) + f.operation.Fields[left].Directives.Refs = append(f.operation.Fields[left].Directives.Refs, f.operation.Fields[right].Directives.Refs...) +} + +func (f *fieldSelectionMergeVisitor) EnterSelectionSet(ref int) { + + if len(f.operation.SelectionSets[ref].SelectionRefs) < 2 { + return + } + + for _, leftSelection := range f.operation.SelectionSets[ref].SelectionRefs { + if !f.isFieldSelection(leftSelection) { + continue + } + leftField := f.operation.Selections[leftSelection].Ref + for i, rightSelection := range f.operation.SelectionSets[ref].SelectionRefs { + if !f.isFieldSelection(rightSelection) { + continue + } + if leftSelection == rightSelection { + continue + } + rightField := f.operation.Selections[rightSelection].Ref + if !f.fieldsHaveSelections(leftField, rightField) { + continue + } + if !f.fieldsCanMerge(leftField, rightField) { + continue + } + f.removeSelection(ref, i) + f.mergeFields(leftField, rightField) + f.RevisitNode() + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_definition_removal.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_definition_removal.go new file mode 100644 index 00000000000..9bd223093e6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_definition_removal.go @@ -0,0 +1,25 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +type FragmentDefinitionRemoval struct { +} + +func removeFragmentDefinitions(walker *astvisitor.Walker) { + visitor := removeFragmentDefinitionsVisitor{} + walker.RegisterLeaveDocumentVisitor(visitor) +} + +type removeFragmentDefinitionsVisitor struct { +} + +func (r removeFragmentDefinitionsVisitor) LeaveDocument(operation, definition *ast.Document) { + for i := range operation.RootNodes { + if operation.RootNodes[i].Kind == ast.NodeKindFragmentDefinition { + operation.RootNodes[i].Kind = ast.NodeKindUnknown + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_spread_inlining.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_spread_inlining.go new file mode 100644 index 00000000000..d3b43f5e319 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragment_spread_inlining.go @@ -0,0 +1,115 @@ +package astnormalization + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/asttransform" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func fragmentSpreadInline(walker *astvisitor.Walker) { + visitor := fragmentSpreadInlineVisitor{ + Walker: walker, + } + walker.RegisterDocumentVisitor(&visitor) + walker.RegisterEnterFragmentSpreadVisitor(&visitor) +} + +type fragmentSpreadInlineVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + transformer asttransform.Transformer + fragmentSpreadDepth FragmentSpreadDepth + depths Depths +} + +func (f *fragmentSpreadInlineVisitor) EnterDocument(operation, definition *ast.Document) { + f.transformer.Reset() + f.depths = f.depths[:0] + f.operation = operation + f.definition = definition + + f.fragmentSpreadDepth.Get(operation, definition, f.Report, &f.depths) + if f.Report.HasErrors() { + f.Stop() + } +} + +func (f *fragmentSpreadInlineVisitor) LeaveDocument(operation, definition *ast.Document) { + f.transformer.ApplyTransformations(operation) +} + +func (f *fragmentSpreadInlineVisitor) EnterFragmentSpread(ref int) { + parentTypeName := f.definition.NodeNameBytes(f.EnclosingTypeDefinition) + + fragmentDefinitionRef, exists := f.operation.FragmentDefinitionRef(f.operation.FragmentSpreadNameBytes(ref)) + if !exists { + fragmentName := f.operation.FragmentSpreadNameBytes(ref) + f.StopWithExternalErr(operationreport.ErrFragmentUndefined(fragmentName)) + return + } + + fragmentTypeName := f.operation.FragmentDefinitionTypeName(fragmentDefinitionRef) + fragmentNode, exists := f.definition.NodeByName(fragmentTypeName) + if !exists { + f.StopWithExternalErr(operationreport.ErrTypeUndefined(fragmentTypeName)) + return + } + + fragmentTypeEqualsParentType := bytes.Equal(parentTypeName, fragmentTypeName) + var enclosingTypeImplementsFragmentType bool + var enclosingTypeIsMemberOfFragmentUnion bool + var fragmentTypeImplementsEnclosingType bool + var fragmentTypeIsMemberOfEnclosingUnionType bool + var fragmentUnionIntersectsEnclosingInterface bool + var fragmentInterfaceIntersectsEnclosingUnion bool + + if fragmentNode.Kind == ast.NodeKindInterfaceTypeDefinition && f.EnclosingTypeDefinition.Kind == ast.NodeKindObjectTypeDefinition { + enclosingTypeImplementsFragmentType = f.definition.NodeImplementsInterface(f.EnclosingTypeDefinition, fragmentNode) + } + + if fragmentNode.Kind == ast.NodeKindUnionTypeDefinition { + enclosingTypeIsMemberOfFragmentUnion = f.definition.NodeIsUnionMember(f.EnclosingTypeDefinition, fragmentNode) + } + + if f.EnclosingTypeDefinition.Kind == ast.NodeKindInterfaceTypeDefinition { + fragmentTypeImplementsEnclosingType = f.definition.NodeImplementsInterface(fragmentNode, f.EnclosingTypeDefinition) + } + + if f.EnclosingTypeDefinition.Kind == ast.NodeKindInterfaceTypeDefinition && fragmentNode.Kind == ast.NodeKindUnionTypeDefinition { + fragmentUnionIntersectsEnclosingInterface = f.definition.UnionNodeIntersectsInterfaceNode(fragmentNode, f.EnclosingTypeDefinition) + } + + if f.EnclosingTypeDefinition.Kind == ast.NodeKindUnionTypeDefinition && fragmentNode.Kind == ast.NodeKindInterfaceTypeDefinition { + fragmentInterfaceIntersectsEnclosingUnion = f.definition.UnionNodeIntersectsInterfaceNode(f.EnclosingTypeDefinition, fragmentNode) + } + + if f.EnclosingTypeDefinition.Kind == ast.NodeKindUnionTypeDefinition { + fragmentTypeIsMemberOfEnclosingUnionType = f.definition.NodeIsUnionMember(fragmentNode, f.EnclosingTypeDefinition) + } + + nestedDepth, ok := f.depths.ByRef(ref) + if !ok { + f.StopWithInternalErr(fmt.Errorf("nested depth missing on depths for FragmentSpread: %s", f.operation.FragmentSpreadNameString(ref))) + return + } + + precedence := asttransform.Precedence{ + Depth: nestedDepth, + Order: 0, + } + + selectionSet := f.Ancestors[len(f.Ancestors)-1].Ref + replaceWith := f.operation.FragmentDefinitions[fragmentDefinitionRef].SelectionSet + typeCondition := f.operation.FragmentDefinitions[fragmentDefinitionRef].TypeCondition + + switch { + case fragmentTypeEqualsParentType || enclosingTypeImplementsFragmentType: + f.transformer.ReplaceFragmentSpread(precedence, selectionSet, ref, replaceWith) + case fragmentTypeImplementsEnclosingType || fragmentTypeIsMemberOfEnclosingUnionType || enclosingTypeIsMemberOfFragmentUnion || fragmentUnionIntersectsEnclosingInterface || fragmentInterfaceIntersectsEnclosingUnion: + f.transformer.ReplaceFragmentSpreadWithInlineFragment(precedence, selectionSet, ref, replaceWith, typeCondition) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragmentspread_depth.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragmentspread_depth.go new file mode 100644 index 00000000000..a75812c9e01 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/fragmentspread_depth.go @@ -0,0 +1,105 @@ +package astnormalization + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// FragmentSpreadDepth is a helper for nested Fragments to calculate the actual depth of a Fragment Node +type FragmentSpreadDepth struct { + walker astvisitor.Walker + visitor fragmentSpreadDepthVisitor + calc nestedDepthCalc + visitorsRegistered bool +} + +// Depth holds all necessary information to understand the Depth of a Fragment Node +type Depth struct { + SpreadRef int + Depth int + SpreadName ast.ByteSlice + isNested bool + parentFragmentName ast.ByteSlice +} + +type Depths []Depth + +func (d Depths) ByRef(ref int) (int, bool) { + for i := range d { + if d[i].SpreadRef == ref { + return d[i].Depth, true + } + } + return -1, false +} + +// Get returns all FragmentSpread Depths for a given AST +func (r *FragmentSpreadDepth) Get(operation, definition *ast.Document, report *operationreport.Report, depths *Depths) { + + if !r.visitorsRegistered { + r.walker.RegisterEnterFragmentSpreadVisitor(&r.visitor) + r.visitorsRegistered = true + } + + r.visitor.operation = operation + r.visitor.definition = definition + r.visitor.depths = depths + r.visitor.Walker = &r.walker + + r.walker.Walk(operation, definition, report) + r.calc.calculatedNestedDepths(depths) +} + +type nestedDepthCalc struct { + depths *Depths +} + +func (n *nestedDepthCalc) calculatedNestedDepths(depths *Depths) { + n.depths = depths + + for i := range *depths { + (*depths)[i].Depth = n.calculateNestedDepth(i) + } +} + +func (n *nestedDepthCalc) calculateNestedDepth(i int) int { + if !(*n.depths)[i].isNested { + return (*n.depths)[i].Depth + } + return (*n.depths)[i].Depth + n.depthForFragment((*n.depths)[i].parentFragmentName) +} + +func (n *nestedDepthCalc) depthForFragment(name ast.ByteSlice) int { + for i := range *n.depths { + if bytes.Equal(name, (*n.depths)[i].SpreadName) { + return n.calculateNestedDepth(i) + } + } + return 0 +} + +type fragmentSpreadDepthVisitor struct { + *astvisitor.Walker + operation *ast.Document + definition *ast.Document + depths *Depths +} + +func (r *fragmentSpreadDepthVisitor) EnterFragmentSpread(ref int) { + + depth := Depth{ + SpreadRef: ref, + Depth: r.Depth, + SpreadName: r.operation.FragmentSpreadNameBytes(ref), + } + + if r.Ancestors[0].Kind == ast.NodeKindFragmentDefinition { + depth.isNested = true + depth.parentFragmentName = r.operation.FragmentDefinitionNameBytes(r.Ancestors[0].Ref) + } + + *r.depths = append(*r.depths, depth) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_extend_root_operation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_extend_root_operation.go new file mode 100644 index 00000000000..023746c36ac --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_extend_root_operation.go @@ -0,0 +1,54 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +/* +type Query {...} +type Mutation {...} +type Subscription {...} + +will be, + +extend type Query {...} +extend type Mutation {...} +extend type Subscription {...} + +this also works if root types are defined in schema{...} with other names. +root types are left unmodified if they have no fields, directives or implements any interface. +*/ +type implicitExtendRootOperationVisitor struct { + operation *ast.Document +} + +func implicitExtendRootOperation(walker *astvisitor.Walker) { + v := &implicitExtendRootOperationVisitor{} + walker.RegisterEnterDocumentVisitor(v) + walker.RegisterEnterObjectTypeDefinitionVisitor(v) +} + +func (v *implicitExtendRootOperationVisitor) EnterDocument(operation, _ *ast.Document) { + v.operation = operation +} + +func (v *implicitExtendRootOperationVisitor) EnterObjectTypeDefinition(ref int) { + node := v.operation.ObjectTypeDefinitions[ref] + if !(node.HasFieldDefinitions || node.HasDirectives) { + return + } + switch v.operation.ObjectTypeDefinitionNameString(ref) { + case implicitQueryTypeName, implicitMutationTypeName, implicitSubscriptionTypeName, + v.operation.Index.QueryTypeName.String(), v.operation.Index.MutationTypeName.String(), v.operation.Index.SubscriptionTypeName.String(): + for i := range v.operation.RootNodes { + if v.operation.RootNodes[i].Ref == ref && v.operation.RootNodes[i].Kind == ast.NodeKindObjectTypeDefinition { + // give this node a new NodeKind of ObjectTypeExtension + newRef := v.operation.AddObjectTypeDefinitionExtension(ast.ObjectTypeExtension{ObjectTypeDefinition: node}) + // reflect changes inside the root nodes + v.operation.UpdateRootNode(i, newRef, ast.NodeKindObjectTypeExtension) + break + } + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_schema_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_schema_definition.go new file mode 100644 index 00000000000..15198891fca --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/implicit_schema_definition.go @@ -0,0 +1,58 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +const ( + implicitQueryTypeName = "Query" + implicitMutationTypeName = "Mutation" + implicitSubscriptionTypeName = "Subscription" +) + +func implicitSchemaDefinition(walker *astvisitor.Walker) { + visitor := implicitSchemaDefinitionVisitor{ + Walker: walker, + } + walker.RegisterLeaveDocumentVisitor(&visitor) +} + +type implicitSchemaDefinitionVisitor struct { + *astvisitor.Walker +} + +func (i *implicitSchemaDefinitionVisitor) LeaveDocument(operation, definition *ast.Document) { + queryNodeName := i.nodeName(implicitQueryTypeName, operation) + mutationNodeName := i.nodeName(implicitMutationTypeName, operation) + subscriptionNodeName := i.nodeName(implicitSubscriptionTypeName, operation) + + schemaDefinitionRef := operation.SchemaDefinitionRef() + if schemaDefinitionRef == ast.InvalidRef { + operation.ImportSchemaDefinition(queryNodeName, mutationNodeName, subscriptionNodeName) + return + } + + if len(operation.SchemaDefinitions[schemaDefinitionRef].RootOperationTypeDefinitions.Refs) > 0 { + return + } + + operation.ReplaceRootOperationTypesOfSchemaDefinition(schemaDefinitionRef, queryNodeName, mutationNodeName, subscriptionNodeName) +} + +func (i *implicitSchemaDefinitionVisitor) nodeName(operationTypeName string, operation *ast.Document) string { + nodes, ok := operation.Index.NodesByNameStr(operationTypeName) + if !ok { + return "" + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindObjectTypeDefinition { + continue + } + + return operationTypeName + } + + return "" +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inject_input_default_values.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inject_input_default_values.go new file mode 100644 index 00000000000..2dcad5f1eec --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inject_input_default_values.go @@ -0,0 +1,210 @@ +package astnormalization + +import ( + "errors" + "fmt" + "github.com/buger/jsonparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func injectInputFieldDefaults(walker *astvisitor.Walker) *inputFieldDefaultInjectionVisitor { + visitor := &inputFieldDefaultInjectionVisitor{ + Walker: walker, + jsonPath: make([]string, 0), + } + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterVariableDefinitionVisitor(visitor) + return visitor +} + +type inputFieldDefaultInjectionVisitor struct { + *astvisitor.Walker + + operation *ast.Document + definition *ast.Document + + variableName string + jsonPath []string +} + +func (v *inputFieldDefaultInjectionVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation, v.definition = operation, definition +} + +func (v *inputFieldDefaultInjectionVisitor) EnterVariableDefinition(ref int) { + v.variableName = v.operation.VariableDefinitionNameString(ref) + + variableVal, _, _, err := jsonparser.Get(v.operation.Input.Variables, v.variableName) + if err == jsonparser.KeyPathNotFoundError { + return + } + if err != nil { + v.StopWithInternalErr(err) + return + } + + typeRef := v.operation.VariableDefinitions[ref].Type + if v.isScalarTypeOrExtension(typeRef, v.operation) { + return + } + newVal, err := v.processObjectOrListInput(typeRef, variableVal, v.operation) + if err != nil { + v.StopWithInternalErr(err) + return + } + newVariables, err := jsonparser.Set(v.operation.Input.Variables, newVal, v.variableName) + if err != nil { + v.StopWithInternalErr(err) + return + } + v.operation.Input.Variables = newVariables +} + +func (v *inputFieldDefaultInjectionVisitor) recursiveInjectInputFields(inputObjectRef int, varValue []byte) ([]byte, error) { + finalVal := varValue + objectDef := v.definition.InputObjectTypeDefinitions[inputObjectRef] + if !objectDef.HasInputFieldsDefinition { + return varValue, nil + } + for _, ref := range objectDef.InputFieldsDefinition.Refs { + valDef := v.definition.InputValueDefinitions[ref] + fieldName := v.definition.InputValueDefinitionNameString(ref) + isTypeScalarOrEnum := v.isScalarTypeOrExtension(valDef.Type, v.definition) + hasDefault := valDef.DefaultValue.IsDefined + + varVal, _, _, err := jsonparser.Get(varValue, fieldName) + if err != nil && err != jsonparser.KeyPathNotFoundError { + v.StopWithInternalErr(err) + return nil, err + } + existsInVal := err != jsonparser.KeyPathNotFoundError + + if !isTypeScalarOrEnum { + var valToUse []byte + if existsInVal { + valToUse = varVal + } else if hasDefault { + defVal, err := v.definition.ValueToJSON(valDef.DefaultValue.Value) + if err != nil { + return nil, err + } + valToUse = defVal + } else { + continue + } + fieldValue, err := v.processObjectOrListInput(valDef.Type, valToUse, v.definition) + if err != nil { + return nil, err + } + finalVal, err = jsonparser.Set(finalVal, fieldValue, fieldName) + if err != nil { + return nil, err + } + continue + } + + if !hasDefault && isTypeScalarOrEnum { + continue + } + if existsInVal { + continue + } + defVal, err := v.definition.ValueToJSON(valDef.DefaultValue.Value) + if err != nil { + return nil, err + } + + finalVal, err = jsonparser.Set(finalVal, defVal, fieldName) + if err != nil { + return nil, err + } + } + return finalVal, nil +} + +func (v *inputFieldDefaultInjectionVisitor) isScalarTypeOrExtension(typeRef int, typeDoc *ast.Document) bool { + if typeDoc.TypeIsScalar(typeRef, v.definition) || typeDoc.TypeIsEnum(typeRef, v.definition) { + return true + } + typeName := typeDoc.TypeNameBytes(typeRef) + node, found := v.definition.Index.FirstNonExtensionNodeByNameBytes(typeName) + if !found { + return false + } + switch node.Kind { + case ast.NodeKindScalarTypeDefinition, ast.NodeKindEnumTypeDefinition: + return true + } + return false +} + +func (v *inputFieldDefaultInjectionVisitor) processObjectOrListInput(fieldType int, defaultValue []byte, typeDoc *ast.Document) ([]byte, error) { + finalVal := defaultValue + fieldIsList := typeDoc.TypeIsList(fieldType) + varVal, valType, _, err := jsonparser.Get(defaultValue) + if err != nil { + return nil, err + + } + node, found := v.definition.Index.FirstNodeByNameBytes(typeDoc.ResolveTypeNameBytes(fieldType)) + if !found { + return finalVal, nil + } + if node.Kind == ast.NodeKindScalarTypeDefinition { + return finalVal, nil + } + valIsList := valType == jsonparser.Array + if fieldIsList && valIsList { + _, err := jsonparser.ArrayEach(varVal, v.jsonWalker(typeDoc.ResolveListOrNameType(fieldType), defaultValue, &node, typeDoc, &finalVal)) + if err != nil { + return nil, err + + } + } else if !fieldIsList && !valIsList { + finalVal, err = v.recursiveInjectInputFields(node.Ref, defaultValue) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("mismatched input value") + } + return finalVal, nil +} + +func (v *inputFieldDefaultInjectionVisitor) jsonWalker(fieldType int, defaultValue []byte, node *ast.Node, typeDoc *ast.Document, finalVal *[]byte) func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + i := 0 + listOfList := typeDoc.TypeIsList(typeDoc.Types[fieldType].OfType) + return func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + if err != nil { + return + } + if listOfList && dataType == jsonparser.Array { + newVal, err := v.processObjectOrListInput(typeDoc.Types[fieldType].OfType, value, typeDoc) + if err != nil { + return + } + *finalVal, err = jsonparser.Set(defaultValue, newVal, fmt.Sprintf("[%d]", i)) + if err != nil { + return + } + } else if !listOfList && dataType == jsonparser.Object { + newVal, err := v.recursiveInjectInputFields(node.Ref, value) + if err != nil { + return + } + *finalVal, err = jsonparser.Set(defaultValue, newVal, fmt.Sprintf("[%d]", i)) + if err != nil { + return + } + } else { + return + } + i++ + } + +} +func (v *inputFieldDefaultInjectionVisitor) LeaveVariableDefinition(ref int) { + v.variableName = "" + v.jsonPath = make([]string, 0) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inline_fragment_merging.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inline_fragment_merging.go new file mode 100644 index 00000000000..79448f54a79 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/inline_fragment_merging.go @@ -0,0 +1,63 @@ +package astnormalization + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func mergeInlineFragments(walker *astvisitor.Walker) { + visitor := mergeInlineFragmentsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterSelectionSetVisitor(&visitor) +} + +type mergeInlineFragmentsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (m *mergeInlineFragmentsVisitor) EnterDocument(operation, definition *ast.Document) { + m.operation = operation + m.definition = definition +} + +func (m *mergeInlineFragmentsVisitor) couldInline(set, inlineFragment int) bool { + if m.operation.InlineFragmentHasDirectives(inlineFragment) { + return false + } + if !m.operation.InlineFragmentHasTypeCondition(inlineFragment) { + return true + } + if bytes.Equal(m.operation.InlineFragmentTypeConditionName(inlineFragment), m.definition.NodeNameBytes(m.EnclosingTypeDefinition)) { + return true + } + + inlineFragmentTypeName := m.operation.InlineFragmentTypeConditionName(inlineFragment) + enclosingTypeName := m.definition.NodeNameBytes(m.EnclosingTypeDefinition) + + return m.definition.TypeDefinitionContainsImplementsInterface(enclosingTypeName, inlineFragmentTypeName) +} + +func (m *mergeInlineFragmentsVisitor) resolveInlineFragment(set, index, inlineFragment int) { + m.operation.ReplaceSelectionOnSelectionSet(set, index, m.operation.InlineFragments[inlineFragment].SelectionSet) +} + +func (m *mergeInlineFragmentsVisitor) EnterSelectionSet(ref int) { + + for index, selection := range m.operation.SelectionSets[ref].SelectionRefs { + if m.operation.Selections[selection].Kind != ast.SelectionKindInlineFragment { + continue + } + inlineFragment := m.operation.Selections[selection].Ref + if !m.couldInline(ref, inlineFragment) { + continue + } + m.resolveInlineFragment(ref, index, inlineFragment) + m.RevisitNode() + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_coercion_for_list.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_coercion_for_list.go new file mode 100644 index 00000000000..9cb4c8e1822 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_coercion_for_list.go @@ -0,0 +1,297 @@ +package astnormalization + +import ( + "strconv" + "strings" + + "github.com/buger/jsonparser" + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" +) + +func inputCoercionForList(walker *astvisitor.Walker) { + visitor := inputCoercionForListVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterVariableDefinitionVisitor(&visitor) +} + +type inputCoercionForListVisitor struct { + *astvisitor.Walker + operation *ast.Document + definition *ast.Document + operationDefinitionRef int + + query []string +} + +func (i *inputCoercionForListVisitor) EnterDocument(operation, definition *ast.Document) { + i.operation, i.definition = operation, definition +} + +func (i *inputCoercionForListVisitor) EnterOperationDefinition(ref int) { + i.operationDefinitionRef = ref +} + +func (i *inputCoercionForListVisitor) EnterVariableDefinition(ref int) { + variableNameString := i.operation.VariableDefinitionNameString(ref) + variableDefinition, exists := i.operation.VariableDefinitionByNameAndOperation(i.operationDefinitionRef, i.operation.VariableValueNameBytes(ref)) + if !exists { + return + } + variableTypeRef := i.operation.VariableDefinitions[variableDefinition].Type + variableTypeRef = i.operation.ResolveListOrNameType(variableTypeRef) + + value, dataType, _, err := jsonparser.Get(i.operation.Input.Variables, variableNameString) + if err == jsonparser.KeyPathNotFoundError { + // If the user doesn't provide any variable with that name, + // there is no need for coercion. Stop the operation + return + } + if err != nil { + i.StopWithInternalErr(err) + return + } + + i.query = append(i.query, variableNameString) + + switch i.operation.Types[variableTypeRef].TypeKind { + case ast.TypeKindList: + i.processTypeKindList(i.operation, variableTypeRef, value, dataType) + case ast.TypeKindNamed: + // We build a query to insert changes to the original variable + // Sample query: inputs.list.1.list.nested.list.1 + i.processTypeKindNamed(i.operation, i.operation.VariableDefinitions[ref].Type, value, dataType) + } +} + +func (i *inputCoercionForListVisitor) LeaveVariableDefinition(ref int) { + i.query = i.query[:0] +} + +func (i *inputCoercionForListVisitor) makeJSONArray(nestingDepth int, value []byte, dataType jsonparser.ValueType) ([]byte, error) { + wrapValueInQuotes := dataType == jsonparser.String + + out := pool.BytesBuffer.Get() + defer pool.BytesBuffer.Put(out) + + // value type is a non-array. Let's build an array from it. + for idx := 0; idx < nestingDepth; idx++ { + _, err := out.Write(literal.LBRACK) + if err != nil { + return nil, err + } + } + + if wrapValueInQuotes { + _, err := out.Write(literal.QUOTE) + if err != nil { + return nil, err + } + } + + _, err := out.Write(value) + if err != nil { + return nil, err + } + + if wrapValueInQuotes { + _, err := out.Write(literal.QUOTE) + if err != nil { + return nil, err + } + } + + for idx := 0; idx < nestingDepth; idx++ { + _, err = out.Write(literal.RBRACK) + if err != nil { + return nil, err + } + } + + // We built a JSON array from the given variable here. + + // Use a new slice before putting it into the variables. + // If we use the `out` buffer here, another pool user could re-use + // it and manipulate the variables. + data := make([]byte, out.Len()) + copy(data, out.Bytes()) + return data, nil +} + +func (i *inputCoercionForListVisitor) updateQuery(path string) { + i.query = append(i.query, path) +} + +func (i *inputCoercionForListVisitor) queryPath() (path string) { + return strings.Join(i.query, ".") +} + +func (i *inputCoercionForListVisitor) popQuery() { + if len(i.query)-1 > 0 { + i.query = i.query[:len(i.query)-1] + } +} + +func (i *inputCoercionForListVisitor) calculateNestingDepth(document *ast.Document, typeRef int) int { + var nestingDepth int + for typeRef != ast.InvalidRef { + first := document.Types[typeRef] + + typeRef = first.OfType + + switch first.TypeKind { + case ast.TypeKindList: + nestingDepth++ + default: + continue + } + } + return nestingDepth +} + +/* +we analyzing json: + +variants: + +- array - find correspoding type and go to an each object +- object - find corresponding type and do deep field analysis +- plain: - do nothing + +Object in depth: + +Object is an InputDefinition + +we iterate over objects field and trying to find corresponding field type + +when we found field type: + +it could be: +- NamedType + +- List +when it is a list + +we could have data as: +- json array - proceed recursively +- json plain - wrap into array +- json object - wrap into array and proceed recursively + + +*/ + +func (i *inputCoercionForListVisitor) walkJsonObject(inputObjDefTypeRef int, data []byte) { + err := jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + i.updateQuery(string(key)) + defer i.popQuery() + + inputValueDefRef := i.definition.InputObjectTypeDefinitionInputValueDefinitionByName(inputObjDefTypeRef, key) + // if the inputValueDefRef is invalid then the input value of this variable key does not exist in the input object type so skip + if inputValueDefRef == ast.InvalidRef { + return nil + } + typeRef := i.definition.ResolveListOrNameType(i.definition.InputValueDefinitionType(inputValueDefRef)) + + switch i.definition.Types[typeRef].TypeKind { + case ast.TypeKindList: + i.processTypeKindList(i.definition, typeRef, value, dataType) + case ast.TypeKindNamed: + // We build a query to insert changes to the original variable + // Sample query: inputs.list.1.list.nested.list.1 + i.processTypeKindNamed(i.definition, typeRef, value, dataType) + } + + return nil + + }) + if err != nil { + i.StopWithInternalErr(err) + } +} + +func (i *inputCoercionForListVisitor) walkJsonArray(document *ast.Document, listItemTypeRef int, data []byte) { + index := -1 + _, err := jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, cbErr error) { + if cbErr != nil { + i.StopWithInternalErr(cbErr) + return + } + index++ + + i.updateQuery(strconv.Itoa(index)) + defer i.popQuery() + + itemTypeRef := document.ResolveListOrNameType(listItemTypeRef) + + switch document.Types[itemTypeRef].TypeKind { + case ast.TypeKindList: + i.processTypeKindList(document, itemTypeRef, value, dataType) + case ast.TypeKindNamed: + // We build a query to insert changes to the original variable + // Sample query: inputs.list.1.list.nested.list.1 + i.processTypeKindNamed(document, itemTypeRef, value, dataType) + } + }) + + if err != nil { + i.StopWithInternalErr(err) + } + +} + +func (i *inputCoercionForListVisitor) processTypeKindNamed(document *ast.Document, typeRef int, value []byte, dataType jsonparser.ValueType) { + if dataType != jsonparser.Object { + return + } + + typeName := document.ResolveTypeNameBytes(typeRef) + + node, exist := i.definition.Index.FirstNodeByNameBytes(typeName) + if !exist { + return + } + + switch node.Kind { + case ast.NodeKindInputObjectTypeDefinition: + i.walkJsonObject(node.Ref, value) + case ast.NodeKindScalarTypeDefinition: + return + } +} + +func (i *inputCoercionForListVisitor) processTypeKindList(document *ast.Document, typeRef int, value []byte, dataType jsonparser.ValueType) { + // Build arrays from scalar/object types. If the variable type is an array or null, + // stop the operation. + // Take a look at that table: https://spec.graphql.org/October2021/#sec-List.Input-Coercion + switch dataType { + case jsonparser.Array: + i.walkJsonArray(document, document.Types[typeRef].OfType, value) + return + case jsonparser.Null: + return + default: + } + + // Calculate the nesting depth of variable definition + // For example: [[Int]], nestingDepth = 2 + nestingDepth := i.calculateNestingDepth(document, typeRef) + + data, err := i.makeJSONArray(nestingDepth, value, dataType) + if err != nil { + i.StopWithInternalErr(err) + return + } + i.operation.Input.Variables, err = sjson.SetRawBytes(i.operation.Input.Variables, i.queryPath(), data) + if err != nil { + i.StopWithInternalErr(err) + return + } + + i.walkJsonArray(document, document.Types[typeRef].OfType, data) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_object_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_object_type_extending.go new file mode 100644 index 00000000000..b5cc876d5aa --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/input_object_type_extending.go @@ -0,0 +1,54 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendInputObjectTypeDefinition(walker *astvisitor.Walker) { + visitor := extendInputObjectTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterInputObjectTypeExtensionVisitor(&visitor) +} + +func extendInputObjectTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendInputObjectTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterInputObjectTypeExtensionVisitor(&visitor) +} + +type extendInputObjectTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendInputObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendInputObjectTypeDefinitionVisitor) EnterInputObjectTypeExtension(ref int) { + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.InputObjectTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindInputObjectTypeDefinition { + continue + } + e.operation.ExtendInputObjectTypeDefinitionByInputObjectTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendInputObjectTypeDefinitionByInputObjectTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/interface_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/interface_type_extending.go new file mode 100644 index 00000000000..cd30ec908c3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/interface_type_extending.go @@ -0,0 +1,54 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendInterfaceTypeDefinition(walker *astvisitor.Walker) { + visitor := extendInterfaceTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(&visitor) +} + +func extendInterfaceTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendInterfaceTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(&visitor) +} + +type extendInterfaceTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendInterfaceTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendInterfaceTypeDefinitionVisitor) EnterInterfaceTypeExtension(ref int) { + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.InterfaceTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindInterfaceTypeDefinition { + continue + } + e.operation.ExtendInterfaceTypeDefinitionByInterfaceTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendInterfaceTypeDefinitionByInterfaceTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/object_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/object_type_extending.go new file mode 100644 index 00000000000..34dc59c333c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/object_type_extending.go @@ -0,0 +1,55 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendObjectTypeDefinition(walker *astvisitor.Walker) { + visitor := extendObjectTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(&visitor) +} + +func extendObjectTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendObjectTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(&visitor) +} + +type extendObjectTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendObjectTypeDefinitionVisitor) EnterObjectTypeExtension(ref int) { + + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.ObjectTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindObjectTypeDefinition { + continue + } + e.operation.ExtendObjectTypeDefinitionByObjectTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendObjectTypeDefinitionByObjectTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_self_aliasing.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_self_aliasing.go new file mode 100644 index 00000000000..1751cdd2bf4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_self_aliasing.go @@ -0,0 +1,32 @@ +package astnormalization + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func removeSelfAliasing(walker *astvisitor.Walker) { + visitor := removeSelfAliasingVisitor{} + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterFieldVisitor(&visitor) +} + +type removeSelfAliasingVisitor struct { + operation *ast.Document +} + +func (r *removeSelfAliasingVisitor) EnterDocument(operation, definition *ast.Document) { + r.operation = operation +} + +func (r *removeSelfAliasingVisitor) EnterField(ref int) { + if !r.operation.Fields[ref].Alias.IsDefined { + return + } + if !bytes.Equal(r.operation.FieldNameBytes(ref), r.operation.FieldAliasBytes(ref)) { + return + } + r.operation.RemoveFieldAlias(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_type_extensions.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_type_extensions.go new file mode 100644 index 00000000000..43da16ade42 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/remove_type_extensions.go @@ -0,0 +1,21 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func removeMergedTypeExtensions(walker *astvisitor.Walker) { + visitor := removeMergedTypeExtensionsVisitor{ + Walker: walker, + } + walker.RegisterLeaveDocumentVisitor(&visitor) +} + +type removeMergedTypeExtensionsVisitor struct { + *astvisitor.Walker +} + +func (r *removeMergedTypeExtensionsVisitor) LeaveDocument(operation, definition *ast.Document) { + operation.RemoveMergedTypeExtensions() +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/scalar_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/scalar_type_extending.go new file mode 100644 index 00000000000..206e7879e19 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/scalar_type_extending.go @@ -0,0 +1,55 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendScalarTypeDefinition(walker *astvisitor.Walker) { + visitor := extendScalarTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterScalarTypeExtensionVisitor(&visitor) +} + +func extendScalarTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendScalarTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterScalarTypeExtensionVisitor(&visitor) +} + +type extendScalarTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendScalarTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendScalarTypeDefinitionVisitor) EnterScalarTypeExtension(ref int) { + + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.ScalarTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindScalarTypeDefinition { + continue + } + e.operation.ExtendScalarTypeDefinitionByScalarTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendScalarTypeDefinitionByScalarTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/subgraph_sdl_normalization.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/subgraph_sdl_normalization.go new file mode 100644 index 00000000000..849f68cefdf --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/subgraph_sdl_normalization.go @@ -0,0 +1,33 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func NormalizeSubgraphSDL(definition *ast.Document, report *operationreport.Report) { + normalizer := NewSubgraphSDLNormalizer() + normalizer.NormalizeSubgraphSDL(definition, report) +} + +type SubgraphSDLNormalizer struct { + walker *astvisitor.Walker +} + +func NewSubgraphSDLNormalizer() *SubgraphSDLNormalizer { + normalizer := &SubgraphSDLNormalizer{} + normalizer.setupWalkers() + return normalizer +} + +func (s *SubgraphSDLNormalizer) setupWalkers() { + walker := astvisitor.NewWalker(48) + implicitExtendRootOperation(&walker) + extendsDirective(&walker) + s.walker = &walker +} + +func (s *SubgraphSDLNormalizer) NormalizeSubgraphSDL(definition *ast.Document, report *operationreport.Report) { + s.walker.Walk(definition, nil, report) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/union_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/union_type_extending.go new file mode 100644 index 00000000000..48f1652e18d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/union_type_extending.go @@ -0,0 +1,54 @@ +package astnormalization + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extendUnionTypeDefinition(walker *astvisitor.Walker) { + visitor := extendUnionTypeDefinitionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterUnionTypeExtensionVisitor(&visitor) +} + +func extendUnionTypeDefinitionKeepingOrphans(walker *astvisitor.Walker) { + visitor := extendUnionTypeDefinitionVisitor{ + Walker: walker, + keepExtensionOrphans: true, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterUnionTypeExtensionVisitor(&visitor) +} + +type extendUnionTypeDefinitionVisitor struct { + *astvisitor.Walker + operation *ast.Document + keepExtensionOrphans bool +} + +func (e *extendUnionTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.operation = operation +} + +func (e *extendUnionTypeDefinitionVisitor) EnterUnionTypeExtension(ref int) { + nodes, exists := e.operation.Index.NodesByNameBytes(e.operation.UnionTypeExtensionNameBytes(ref)) + if !exists { + return + } + + for i := range nodes { + if nodes[i].Kind != ast.NodeKindUnionTypeDefinition { + continue + } + e.operation.ExtendUnionTypeDefinitionByUnionTypeExtension(nodes[i].Ref, ref) + return + } + + if e.keepExtensionOrphans { + return + } + + e.operation.ImportAndExtendUnionTypeDefinitionByUnionTypeExtension(ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_default_value_extraction.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_default_value_extraction.go new file mode 100644 index 00000000000..e52bfb4c1a3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_default_value_extraction.go @@ -0,0 +1,218 @@ +package astnormalization + +import ( + "bytes" + + "github.com/buger/jsonparser" + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extractVariablesDefaultValue(walker *astvisitor.Walker) *variablesDefaultValueExtractionVisitor { + visitor := &variablesDefaultValueExtractionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterOperationDefinitionVisitor(visitor) + walker.RegisterEnterVariableDefinitionVisitor(visitor) + walker.RegisterEnterFieldVisitor(visitor) + return visitor +} + +type variablesDefaultValueExtractionVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + importer astimport.Importer + operationName []byte + operationRef int + skip bool + nonNullableVariablesNames [][]byte + extractedVariablesRefs []int +} + +func (v *variablesDefaultValueExtractionVisitor) EnterField(ref int) { + if v.skip { + return + } + + // find field definition from document + fieldName := v.operation.FieldNameBytes(ref) + fieldDefRef, ok := v.definition.NodeFieldDefinitionByName(v.EnclosingTypeDefinition, fieldName) + if !ok { + return + } + + // skip when field has no args in the document + if !v.definition.FieldDefinitions[fieldDefRef].HasArgumentsDefinitions { + return + } + + for _, definitionInputValueDefRef := range v.definition.FieldDefinitions[fieldDefRef].ArgumentsDefinition.Refs { + operationArgRef, exists := v.operation.FieldArgument(ref, v.definition.InputValueDefinitionNameBytes(definitionInputValueDefRef)) + + if exists { + operationArgValue := v.operation.ArgumentValue(operationArgRef) + if v.operation.ValueContainsVariable(operationArgValue) { + defTypeRef := v.definition.InputValueDefinitions[definitionInputValueDefRef].Type + v.traverseValue(operationArgValue, defTypeRef) + } + } else { + v.processDefaultFieldArguments(ref, definitionInputValueDefRef) + } + } +} + +func (v *variablesDefaultValueExtractionVisitor) EnterVariableDefinition(ref int) { + if v.skip { + return + } + + // skip when we have no default value for variable + if !v.operation.VariableDefinitionHasDefaultValue(ref) { + return + } + + variableName := v.operation.VariableDefinitionNameString(ref) + + // remove variable DefaultValue from operation + v.operation.VariableDefinitions[ref].DefaultValue.IsDefined = false + + // skip when variable was provided + _, _, _, err := jsonparser.Get(v.operation.Input.Variables, variableName) + if err == nil { + return + } + + // store extracted variable ref + v.extractedVariablesRefs = append(v.extractedVariablesRefs, ref) + + valueBytes, err := v.operation.ValueToJSON(v.operation.VariableDefinitionDefaultValue(ref)) + if err != nil { + return + } + + v.operation.Input.Variables, err = sjson.SetRawBytes(v.operation.Input.Variables, variableName, valueBytes) + if err != nil { + v.StopWithInternalErr(err) + return + } +} + +func (v *variablesDefaultValueExtractionVisitor) EnterOperationDefinition(ref int) { + if len(v.operationName) == 0 { + v.skip = false + return + } + operationName := v.operation.OperationDefinitionNameBytes(ref) + v.operationRef = ref + v.skip = !bytes.Equal(operationName, v.operationName) + + v.nonNullableVariablesNames = make([][]byte, 0, len(v.operation.VariableDefinitions)) + v.extractedVariablesRefs = make([]int, 0, len(v.operation.VariableDefinitions)) +} + +func (v *variablesDefaultValueExtractionVisitor) LeaveOperationDefinition(_ int) { + if v.skip { + return + } + + // find and make variable not null + for j := 0; j < len(v.extractedVariablesRefs); j++ { + variableDefRef := v.extractedVariablesRefs[j] + + if v.operation.Types[v.operation.VariableDefinitions[variableDefRef].Type].TypeKind == ast.TypeKindNonNull { + // when variable is already not null, skip + continue + } + + for i := 0; i < len(v.nonNullableVariablesNames); i++ { + if bytes.Equal(v.operation.VariableDefinitionNameBytes(variableDefRef), v.nonNullableVariablesNames[i]) { + // if variable is nullable, make it not null as it satisfies both not null and nullable types + // it is required to keep operation valid after variable extraction + v.operation.VariableDefinitions[variableDefRef].Type = v.operation.AddNonNullType(v.operation.VariableDefinitions[variableDefRef].Type) + } + } + } +} + +func (v *variablesDefaultValueExtractionVisitor) traverseValue(value ast.Value, defTypeRef int) { + switch value.Kind { + case ast.ValueKindVariable: + v.saveArgumentsWithTypeNotNull(value.Ref, defTypeRef) + case ast.ValueKindList: + for _, ref := range v.operation.ListValues[value.Ref].Refs { + listValue := v.operation.Value(ref) + if !v.operation.ValueContainsVariable(listValue) { + continue + } + + listTypeRef := defTypeRef + // ommit not null to get to list itself + if v.definition.Types[listTypeRef].TypeKind == ast.TypeKindNonNull { + listTypeRef = v.definition.Types[listTypeRef].OfType + } + + listItemType := v.definition.Types[listTypeRef].OfType + v.traverseValue(listValue, listItemType) + } + case ast.ValueKindObject: + for _, ref := range v.operation.ObjectValues[value.Ref].Refs { + fieldName := v.operation.Input.ByteSlice(v.operation.ObjectFields[ref].Name) + fieldValue := v.operation.ObjectFields[ref].Value + + typeName := v.definition.ResolveTypeNameString(defTypeRef) + typeDefinitionNode, ok := v.definition.Index.FirstNodeByNameStr(typeName) + if !ok { + continue + } + objectFieldDefinitionRef, ok := v.definition.NodeInputFieldDefinitionByName(typeDefinitionNode, fieldName) + if !ok { + continue + } + + if v.operation.ValueContainsVariable(fieldValue) { + v.traverseValue(fieldValue, v.definition.InputValueDefinitions[objectFieldDefinitionRef].Type) + } + } + } +} + +func (v *variablesDefaultValueExtractionVisitor) saveArgumentsWithTypeNotNull(operationVariableValueRef, defTypeRef int) { + if v.definition.Types[defTypeRef].TypeKind != ast.TypeKindNonNull { + return + } + + v.nonNullableVariablesNames = append(v.nonNullableVariablesNames, v.operation.VariableValueNameBytes(operationVariableValueRef)) +} + +func (v *variablesDefaultValueExtractionVisitor) processDefaultFieldArguments(operationFieldRef, definitionInputValueDefRef int) { + if !v.definition.InputValueDefinitionHasDefaultValue(definitionInputValueDefRef) { + return + } + + variableNameBytes := v.operation.GenerateUnusedVariableDefinitionName(v.Ancestors[0].Ref) + valueBytes, err := v.definition.ValueToJSON(v.definition.InputValueDefinitionDefaultValue(definitionInputValueDefRef)) + if err != nil { + return + } + v.operation.Input.Variables, err = sjson.SetRawBytes(v.operation.Input.Variables, unsafebytes.BytesToString(variableNameBytes), valueBytes) + if err != nil { + v.StopWithInternalErr(err) + return + } + + variableValueRef, argRef := v.operation.ImportVariableValueArgument(v.definition.InputValueDefinitionNameBytes(definitionInputValueDefRef), variableNameBytes) + defType := v.definition.InputValueDefinitions[definitionInputValueDefRef].Type + importedDefType := v.importer.ImportType(defType, v.definition, v.operation) + + v.operation.AddArgumentToField(operationFieldRef, argRef) + v.operation.AddVariableDefinitionToOperationDefinition(v.operationRef, variableValueRef, importedDefType) +} + +func (v *variablesDefaultValueExtractionVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation, v.definition = operation, definition +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_extraction.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_extraction.go new file mode 100644 index 00000000000..c4744cc9c7f --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_extraction.go @@ -0,0 +1,198 @@ +package astnormalization + +import ( + "bytes" + + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func extractVariables(walker *astvisitor.Walker) *variablesExtractionVisitor { + visitor := &variablesExtractionVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterArgumentVisitor(visitor) + walker.RegisterEnterOperationVisitor(visitor) + return visitor +} + +type variablesExtractionVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + importer astimport.Importer + operationName []byte + skip bool +} + +func (v *variablesExtractionVisitor) EnterOperationDefinition(ref int) { + if len(v.operationName) == 0 { + v.skip = false + return + } + operationName := v.operation.OperationDefinitionNameBytes(ref) + v.skip = !bytes.Equal(operationName, v.operationName) +} + +func (v *variablesExtractionVisitor) EnterArgument(ref int) { + if v.skip { + return + } + if v.operation.Arguments[ref].Value.Kind == ast.ValueKindVariable { + return + } + if len(v.Ancestors) == 0 || v.Ancestors[0].Kind != ast.NodeKindOperationDefinition { + return + } + + for i := range v.Ancestors { + if v.Ancestors[i].Kind == ast.NodeKindDirective { + return // skip all directives in any case + } + } + + inputValueDefinition, ok := v.Walker.ArgumentInputValueDefinition(ref) + if !ok { + return + } + + containsVariable := v.operation.ValueContainsVariable(v.operation.Arguments[ref].Value) + if containsVariable { + v.traverseValue(v.operation.Arguments[ref].Value, ref, inputValueDefinition) + return + } + + variableNameBytes := v.operation.GenerateUnusedVariableDefinitionName(v.Ancestors[0].Ref) + valueBytes, err := v.operation.ValueToJSON(v.operation.Arguments[ref].Value) + if err != nil { + return + } + v.operation.Input.Variables, err = sjson.SetRawBytes(v.operation.Input.Variables, unsafebytes.BytesToString(variableNameBytes), valueBytes) + if err != nil { + v.StopWithInternalErr(err) + return + } + + variable := ast.VariableValue{ + Name: v.operation.Input.AppendInputBytes(variableNameBytes), + } + + v.operation.VariableValues = append(v.operation.VariableValues, variable) + + varRef := len(v.operation.VariableValues) - 1 + + v.operation.Arguments[ref].Value.Ref = varRef + v.operation.Arguments[ref].Value.Kind = ast.ValueKindVariable + + defRef, ok := v.ArgumentInputValueDefinition(ref) + if !ok { + return + } + + defType := v.definition.InputValueDefinitions[defRef].Type + + importedDefType := v.importer.ImportType(defType, v.definition, v.operation) + + v.operation.VariableDefinitions = append(v.operation.VariableDefinitions, ast.VariableDefinition{ + VariableValue: ast.Value{ + Kind: ast.ValueKindVariable, + Ref: varRef, + }, + Type: importedDefType, + }) + + newVariableRef := len(v.operation.VariableDefinitions) - 1 + + v.operation.OperationDefinitions[v.Ancestors[0].Ref].VariableDefinitions.Refs = + append(v.operation.OperationDefinitions[v.Ancestors[0].Ref].VariableDefinitions.Refs, newVariableRef) + v.operation.OperationDefinitions[v.Ancestors[0].Ref].HasVariableDefinitions = true +} + +func (v *variablesExtractionVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation, v.definition = operation, definition +} + +func (v *variablesExtractionVisitor) traverseValue(value ast.Value, argRef, inputValueDefinition int) { + switch value.Kind { + case ast.ValueKindList: + for _, ref := range v.operation.ListValues[value.Ref].Refs { + listValue := v.operation.Value(ref) + v.traverseValue(listValue, argRef, inputValueDefinition) + } + case ast.ValueKindObject: + objectValueRefs := make([]int, len(v.operation.ObjectValues[value.Ref].Refs)) + copy(objectValueRefs, v.operation.ObjectValues[value.Ref].Refs) + for _, ref := range objectValueRefs { + fieldName := v.operation.Input.ByteSlice(v.operation.ObjectFields[ref].Name) + fieldValue := v.operation.ObjectFields[ref].Value + switch fieldValue.Kind { + case ast.ValueKindVariable: + continue + default: + + typeName := v.definition.ResolveTypeNameString(v.definition.InputValueDefinitions[inputValueDefinition].Type) + typeDefinitionNode, ok := v.definition.Index.FirstNodeByNameStr(typeName) + if !ok { + continue + } + objectFieldDefinition, ok := v.definition.NodeInputFieldDefinitionByName(typeDefinitionNode, fieldName) + if !ok { + continue + } + + if v.operation.ValueContainsVariable(fieldValue) { + v.traverseValue(fieldValue, argRef, objectFieldDefinition) + continue + } + v.extractObjectValue(ref, fieldValue, objectFieldDefinition) + } + } + } +} + +func (v *variablesExtractionVisitor) extractObjectValue(objectField int, fieldValue ast.Value, inputValueDefinition int) { + + variableNameBytes := v.operation.GenerateUnusedVariableDefinitionName(v.Ancestors[0].Ref) + valueBytes, err := v.operation.ValueToJSON(fieldValue) + if err != nil { + return + } + v.operation.Input.Variables, err = sjson.SetRawBytes(v.operation.Input.Variables, unsafebytes.BytesToString(variableNameBytes), valueBytes) + if err != nil { + v.StopWithInternalErr(err) + return + } + + variable := ast.VariableValue{ + Name: v.operation.Input.AppendInputBytes(variableNameBytes), + } + + v.operation.VariableValues = append(v.operation.VariableValues, variable) + + varRef := len(v.operation.VariableValues) - 1 + + v.operation.ObjectFields[objectField].Value.Kind = ast.ValueKindVariable + v.operation.ObjectFields[objectField].Value.Ref = varRef + + defType := v.definition.InputValueDefinitions[inputValueDefinition].Type + + importedDefType := v.importer.ImportType(defType, v.definition, v.operation) + + v.operation.VariableDefinitions = append(v.operation.VariableDefinitions, ast.VariableDefinition{ + VariableValue: ast.Value{ + Kind: ast.ValueKindVariable, + Ref: varRef, + }, + Type: importedDefType, + }) + + newVariableRef := len(v.operation.VariableDefinitions) - 1 + + v.operation.OperationDefinitions[v.Ancestors[0].Ref].VariableDefinitions.Refs = + append(v.operation.OperationDefinitions[v.Ancestors[0].Ref].VariableDefinitions.Refs, newVariableRef) + v.operation.OperationDefinitions[v.Ancestors[0].Ref].HasVariableDefinitions = true +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_unused_deletion.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_unused_deletion.go new file mode 100644 index 00000000000..b271aad45ed --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization/variables_unused_deletion.go @@ -0,0 +1,93 @@ +package astnormalization + +import ( + "bytes" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func deleteUnusedVariables(walker *astvisitor.Walker) *deleteUnusedVariablesVisitor { + visitor := &deleteUnusedVariablesVisitor{ + Walker: walker, + } + visitor.Walker.RegisterEnterDocumentVisitor(visitor) + visitor.Walker.RegisterOperationDefinitionVisitor(visitor) + visitor.Walker.RegisterEnterVariableDefinitionVisitor(visitor) + visitor.Walker.RegisterEnterArgumentVisitor(visitor) + return visitor +} + +type deleteUnusedVariablesVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + definedVariables []int + operationName []byte + skip bool +} + +func (d *deleteUnusedVariablesVisitor) LeaveOperationDefinition(ref int) { + for _, variable := range d.definedVariables { + variableName := d.operation.VariableDefinitionNameString(variable) + for i, variableDefinitionRef := range d.operation.OperationDefinitions[ref].VariableDefinitions.Refs { + if variable == variableDefinitionRef { + d.operation.OperationDefinitions[ref].VariableDefinitions.Refs = append(d.operation.OperationDefinitions[ref].VariableDefinitions.Refs[:i], d.operation.OperationDefinitions[ref].VariableDefinitions.Refs[i+1:]...) + d.operation.Input.Variables = jsonparser.Delete(d.operation.Input.Variables, variableName) + d.operation.OperationDefinitions[ref].HasVariableDefinitions = len(d.operation.OperationDefinitions[ref].VariableDefinitions.Refs) != 0 + } + } + + } + d.skip = true +} + +func (d *deleteUnusedVariablesVisitor) removeDefinedVariableWithName(name []byte) { + for i, variable := range d.definedVariables { + definedVariableNameBytes := d.operation.VariableDefinitionNameBytes(variable) + if bytes.Equal(name, definedVariableNameBytes) { + d.definedVariables = append(d.definedVariables[:i], d.definedVariables[i+1:]...) + d.removeDefinedVariableWithName(name) + return + } + } +} + +func (d *deleteUnusedVariablesVisitor) traverseValue(value ast.Value) { + switch value.Kind { + case ast.ValueKindVariable: + d.removeDefinedVariableWithName(d.operation.VariableValueNameBytes(value.Ref)) + case ast.ValueKindList: + for _, ref := range d.operation.ListValues[value.Ref].Refs { + d.traverseValue(d.operation.Value(ref)) + } + case ast.ValueKindObject: + for _, ref := range d.operation.ObjectValues[value.Ref].Refs { + d.traverseValue(d.operation.ObjectField(ref).Value) + } + } +} + +func (d *deleteUnusedVariablesVisitor) EnterArgument(ref int) { + if d.skip { + return + } + d.traverseValue(d.operation.Arguments[ref].Value) +} + +func (d *deleteUnusedVariablesVisitor) EnterVariableDefinition(ref int) { + if d.skip { + return + } + d.definedVariables = append(d.definedVariables, ref) +} + +func (d *deleteUnusedVariablesVisitor) EnterOperationDefinition(ref int) { + d.definedVariables = d.definedVariables[:0] + d.skip = false +} + +func (d *deleteUnusedVariablesVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation, d.definition = operation, definition +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/errors.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/errors.go new file mode 100644 index 00000000000..521ab426ed5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/errors.go @@ -0,0 +1,53 @@ +package astparser + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type origin struct { + file string + line int + funcName string +} + +// ErrUnexpectedToken is a custom error object containing all necessary information to properly render an unexpected token error +type ErrUnexpectedToken struct { + keyword keyword.Keyword + expected []keyword.Keyword + position position.Position + literal string + origins []origin +} + +func (e ErrUnexpectedToken) Error() string { + + origins := "" + for _, origin := range e.origins { + origins = origins + fmt.Sprintf("\n\t\t%s:%d\n\t\t%s", origin.file, origin.line, origin.funcName) + } + + return fmt.Sprintf("unexpected token - keyword: '%s' literal: '%s' - expected: '%s' position: '%s'%s", e.keyword, e.literal, e.expected, e.position, origins) +} + +// ErrUnexpectedIdentKey is a custom error object to properly render an unexpected ident key error +type ErrUnexpectedIdentKey struct { + keyword identkeyword.IdentKeyword + expected []identkeyword.IdentKeyword + position position.Position + literal string + origins []origin +} + +func (e ErrUnexpectedIdentKey) Error() string { + + origins := "" + for _, origin := range e.origins { + origins = origins + fmt.Sprintf("\n\t\t%s:%d\n\t\t%s", origin.file, origin.line, origin.funcName) + } + + return fmt.Sprintf("unexpected ident - keyword: '%s' literal: '%s' - expected: '%s' position: '%s'%s", e.keyword, e.literal, e.expected, e.position, origins) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser.go new file mode 100644 index 00000000000..fb7dbb37495 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser.go @@ -0,0 +1,1781 @@ +// Package astparser is used to turn raw GraphQL documents into an AST. +package astparser + +import ( + "fmt" + "runtime" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// ParseGraphqlDocumentString takes a raw GraphQL document in string format and parses it into an AST. +// This function creates a new parser as well as a new AST for every call. +// Therefore you shouldn't use this function in a hot path. +// Instead create a parser as well as AST objects and re-use them. +func ParseGraphqlDocumentString(input string) (ast.Document, operationreport.Report) { + return ParseGraphqlDocumentBytes([]byte(input)) +} + +// ParseGraphqlDocumentBytes takes a raw GraphQL document in byte slice format and parses it into an AST. +// This function creates a new parser as well as a new AST for every call. +// Therefore you shouldn't use this function in a hot path. +// Instead create a parser as well as AST objects and re-use them. +func ParseGraphqlDocumentBytes(input []byte) (ast.Document, operationreport.Report) { + parser := NewParser() + doc := *ast.NewDocument() + doc.Input.ResetInputBytes(input) + report := operationreport.Report{} + parser.Parse(&doc, &report) + return doc, report +} + +// Parser takes a raw input and turns it into an AST +// use NewParser() to create a parser +// Don't create new parsers in the hot path, re-use them. +type Parser struct { + document *ast.Document + report *operationreport.Report + tokenizer *Tokenizer + shouldIndex bool + reportInternalErrors bool +} + +// NewParser returns a new parser with all values properly initialized +func NewParser() *Parser { + return &Parser{ + tokenizer: NewTokenizer(), + shouldIndex: true, + reportInternalErrors: false, + } +} + +// PrepareImport prepares the Parser for importing new Nodes into an AST without directly parsing the content +func (p *Parser) PrepareImport(document *ast.Document, report *operationreport.Report) { + p.document = document + p.report = report + p.tokenize() +} + +// Parse parses all input in a Document.Input into the Document +func (p *Parser) Parse(document *ast.Document, report *operationreport.Report) { + p.document = document + p.report = report + p.tokenize() + p.parse() +} + +func (p *Parser) tokenize() { + p.tokenizer.Tokenize(&p.document.Input) +} + +func (p *Parser) parse() { + for { + key, literalReference := p.peekLiteral() + + switch key { + case keyword.EOF: + p.read() + return + case keyword.LBRACE: + p.parseOperationDefinition() + case keyword.STRING, keyword.BLOCKSTRING: + p.parseRootDescription() + case keyword.IDENT: + keyIdent := p.identKeywordSliceRef(literalReference) + switch keyIdent { + case identkeyword.ENUM: + p.parseEnumTypeDefinition(nil) + case identkeyword.TYPE: + p.parseObjectTypeDefinition(nil) + case identkeyword.UNION: + p.parseUnionTypeDefinition(nil) + case identkeyword.QUERY, identkeyword.MUTATION, identkeyword.SUBSCRIPTION: + p.parseOperationDefinition() + case identkeyword.INPUT: + p.parseInputObjectTypeDefinition(nil) + case identkeyword.EXTEND: + p.parseExtension() + case identkeyword.SCHEMA: + p.parseSchemaDefinition() + case identkeyword.SCALAR: + p.parseScalarTypeDefinition(nil) + case identkeyword.FRAGMENT: + p.parseFragmentDefinition() + case identkeyword.INTERFACE: + p.parseInterfaceTypeDefinition(nil) + case identkeyword.DIRECTIVE: + p.parseDirectiveDefinition(nil) + default: + p.errUnexpectedIdentKey(p.read(), keyIdent, identkeyword.ENUM, identkeyword.TYPE, identkeyword.UNION, identkeyword.QUERY, identkeyword.INPUT, identkeyword.EXTEND, identkeyword.SCHEMA, identkeyword.SCALAR, identkeyword.FRAGMENT, identkeyword.INTERFACE, identkeyword.DIRECTIVE) + } + default: + p.errUnexpectedToken(p.read(), keyword.EOF, keyword.LBRACE, keyword.COMMENT, keyword.STRING, keyword.BLOCKSTRING, keyword.IDENT) + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) identKeywordToken(token token.Token) identkeyword.IdentKeyword { + return identkeyword.KeywordFromLiteral(p.document.Input.ByteSlice(token.Literal)) +} + +func (p *Parser) identKeywordSliceRef(ref ast.ByteSliceReference) identkeyword.IdentKeyword { + return identkeyword.KeywordFromLiteral(p.document.Input.ByteSlice(ref)) +} + +func (p *Parser) errUnexpectedIdentKey(unexpected token.Token, unexpectedKey identkeyword.IdentKeyword, expectedKeywords ...identkeyword.IdentKeyword) { + + if p.report.HasErrors() { + return + } + + p.report.AddExternalError(operationreport.ExternalError{ + Message: fmt.Sprintf("unexpected literal - got: %s want one of: %v", unexpectedKey, expectedKeywords), + Locations: []graphqlerrors.Location{ + { + Line: unexpected.TextPosition.LineStart, + Column: unexpected.TextPosition.CharStart, + }, + }, + }) + + if !p.reportInternalErrors { + return + } + + origins := make([]origin, 3) + for i := range origins { + fpcs := make([]uintptr, 1) + callers := runtime.Callers(2+i, fpcs) + + if callers == 0 { + origins = origins[:i] + break + } + + fn := runtime.FuncForPC(fpcs[0]) + file, line := fn.FileLine(fpcs[0]) + + origins[i].file = file + origins[i].line = line + origins[i].funcName = fn.Name() + } + + p.report.AddInternalError(ErrUnexpectedIdentKey{ + keyword: unexpectedKey, + position: unexpected.TextPosition, + literal: p.document.Input.ByteSliceString(unexpected.Literal), + origins: origins, + expected: expectedKeywords, + }) +} + +func (p *Parser) errUnexpectedToken(unexpected token.Token, expectedKeywords ...keyword.Keyword) { + + if p.report.HasErrors() { + return + } + + p.report.AddExternalError(operationreport.ExternalError{ + Message: fmt.Sprintf("unexpected token - got: %s want one of: %v", unexpected.Keyword, expectedKeywords), + Locations: []graphqlerrors.Location{ + { + Line: unexpected.TextPosition.LineStart, + Column: unexpected.TextPosition.CharStart, + }, + }, + }) + + if !p.reportInternalErrors { + return + } + + origins := make([]origin, 3) + for i := range origins { + fpcs := make([]uintptr, 1) + callers := runtime.Callers(2+i, fpcs) + + if callers == 0 { + origins = origins[:i] + break + } + + fn := runtime.FuncForPC(fpcs[0]) + file, line := fn.FileLine(fpcs[0]) + + origins[i].file = file + origins[i].line = line + origins[i].funcName = fn.Name() + } + + p.report.AddInternalError(ErrUnexpectedToken{ + keyword: unexpected.Keyword, + position: unexpected.TextPosition, + literal: p.document.Input.ByteSliceString(unexpected.Literal), + origins: origins, + expected: expectedKeywords, + }) +} + +func (p *Parser) parseSchemaDefinition() { + + schemaLiteral := p.read() + + schemaDefinition := ast.SchemaDefinition{ + SchemaLiteral: schemaLiteral.TextPosition, + } + + if p.peekEquals(keyword.AT) { + schemaDefinition.Directives = p.parseDirectiveList() + schemaDefinition.HasDirectives = len(schemaDefinition.Directives.Refs) > 0 + } + + p.parseRootOperationTypeDefinitionList(&schemaDefinition.RootOperationTypeDefinitions) + + p.document.SchemaDefinitions = append(p.document.SchemaDefinitions, schemaDefinition) + + ref := len(p.document.SchemaDefinitions) - 1 + rootNode := ast.Node{ + Kind: ast.NodeKindSchemaDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(schemaLiteral.Literal, rootNode) + } + p.document.RootNodes = append(p.document.RootNodes, rootNode) +} + +func (p *Parser) parseRootOperationTypeDefinitionList(list *ast.RootOperationTypeDefinitionList) { + + curlyBracketOpen := p.mustRead(keyword.LBRACE) + + for { + next := p.peek() + switch next { + case keyword.RBRACE: + + curlyBracketClose := p.read() + list.LBrace = curlyBracketOpen.TextPosition + list.RBrace = curlyBracketClose.TextPosition + return + case keyword.IDENT: + + _, operationType := p.mustReadOneOf(identkeyword.QUERY, identkeyword.MUTATION, identkeyword.SUBSCRIPTION) + colon := p.mustRead(keyword.COLON) + namedType := p.mustRead(keyword.IDENT) + + rootOperationTypeDefinition := ast.RootOperationTypeDefinition{ + OperationType: p.operationTypeFromIdentKeyword(operationType), + Colon: colon.TextPosition, + NamedType: ast.Type{ + TypeKind: ast.TypeKindNamed, + Name: namedType.Literal, + OfType: ast.InvalidRef, + }, + } + + p.document.RootOperationTypeDefinitions = append(p.document.RootOperationTypeDefinitions, rootOperationTypeDefinition) + ref := len(p.document.RootOperationTypeDefinitions) - 1 + + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + + list.Refs = append(list.Refs, ref) + + if p.shouldIndex { + p.indexRootOperationTypeDefinition(rootOperationTypeDefinition) + } + + default: + p.errUnexpectedToken(p.read()) + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) indexRootOperationTypeDefinition(definition ast.RootOperationTypeDefinition) { + switch definition.OperationType { + case ast.OperationTypeQuery: + p.document.Index.QueryTypeName = p.document.Input.ByteSlice(definition.NamedType.Name) + case ast.OperationTypeMutation: + p.document.Index.MutationTypeName = p.document.Input.ByteSlice(definition.NamedType.Name) + case ast.OperationTypeSubscription: + p.document.Index.SubscriptionTypeName = p.document.Input.ByteSlice(definition.NamedType.Name) + } +} + +func (p *Parser) operationTypeFromIdentKeyword(key identkeyword.IdentKeyword) ast.OperationType { + switch key { + case identkeyword.QUERY: + return ast.OperationTypeQuery + case identkeyword.MUTATION: + return ast.OperationTypeMutation + case identkeyword.SUBSCRIPTION: + return ast.OperationTypeSubscription + default: + return ast.OperationTypeUnknown + } +} + +func (p *Parser) parseDirectiveList() (list ast.DirectiveList) { + + for { + + if p.peek() != keyword.AT { + break + } + + at := p.read() + name := p.mustRead(keyword.IDENT) + + directive := ast.Directive{ + At: at.TextPosition, + Name: name.Literal, + } + + if p.peekEquals(keyword.LPAREN) { + directive.Arguments = p.parseArgumentList() + directive.HasArguments = len(directive.Arguments.Refs) > 0 + } + + p.document.Directives = append(p.document.Directives, directive) + ref := len(p.document.Directives) - 1 + + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + + list.Refs = append(list.Refs, ref) + + if p.report.HasErrors() { + return + } + } + + return +} + +func (p *Parser) parseArgumentList() (list ast.ArgumentList) { + + bracketOpen := p.mustRead(keyword.LPAREN) + +Loop: + for { + + next := p.peek() + switch next { + case keyword.IDENT: + default: + break Loop + } + + name := p.read() + colon := p.mustRead(keyword.COLON) + value := p.ParseValue() + + argument := ast.Argument{ + Name: name.Literal, + Colon: colon.TextPosition, + Value: value, + Position: name.TextPosition, + } + + p.document.Arguments = append(p.document.Arguments, argument) + ref := len(p.document.Arguments) - 1 + + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + + list.Refs = append(list.Refs, ref) + + if p.report.HasErrors() { + return + } + } + + bracketClose := p.mustRead(keyword.RPAREN) + + list.LPAREN = bracketOpen.TextPosition + list.RPAREN = bracketClose.TextPosition + + return +} + +func (p *Parser) ParseValue() (value ast.Value) { + + next, literal := p.peekLiteral() + + switch next { + case keyword.STRING, keyword.BLOCKSTRING: + value.Kind = ast.ValueKindString + value.Ref, value.Position = p.parseStringValue() + case keyword.IDENT: + key := p.identKeywordSliceRef(literal) + switch key { + case identkeyword.TRUE, identkeyword.FALSE: + value.Kind = ast.ValueKindBoolean + value.Ref, value.Position = p.parseBooleanValue() + case identkeyword.NULL: + value.Kind = ast.ValueKindNull + value.Position = p.read().TextPosition + default: + value.Kind = ast.ValueKindEnum + value.Ref, value.Position = p.parseEnumValue() + } + case keyword.DOLLAR: + value.Kind = ast.ValueKindVariable + value.Ref, value.Position = p.parseVariableValue() + case keyword.INTEGER: + value.Kind = ast.ValueKindInteger + value.Ref, value.Position = p.parseIntegerValue(nil) + case keyword.FLOAT: + value.Kind = ast.ValueKindFloat + value.Ref, value.Position = p.parseFloatValue(nil) + case keyword.SUB: + value = p.parseNegativeNumberValue() + case keyword.LBRACK: + value.Kind = ast.ValueKindList + value.Ref = p.parseValueList() + case keyword.LBRACE: + value.Kind = ast.ValueKindObject + value.Ref, value.Position = p.parseObjectValue() + default: + p.errUnexpectedToken(p.read()) + } + + return +} + +func (p *Parser) parseObjectValue() (ref int, pos position.Position) { + var objectValue ast.ObjectValue + objectValue.LBRACE = p.mustRead(keyword.LBRACE).TextPosition + + for { + next := p.peek() + switch next { + case keyword.RBRACE: + objectValue.RBRACE = p.read().TextPosition + return p.document.AddObjectValue(objectValue), objectValue.LBRACE + case keyword.IDENT: + ref := p.parseObjectField() + if cap(objectValue.Refs) == 0 { + objectValue.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + objectValue.Refs = append(objectValue.Refs, ref) + default: + p.errUnexpectedToken(p.read(), keyword.IDENT, keyword.RBRACE) + return ast.InvalidRef, position.Position{} + } + + if p.report.HasErrors() { + return ast.InvalidRef, position.Position{} + } + } +} + +func (p *Parser) parseObjectField() int { + nameToken := p.mustRead(keyword.IDENT) + + objectField := ast.ObjectField{ + Name: nameToken.Literal, + Colon: p.mustRead(keyword.COLON).TextPosition, + Value: p.ParseValue(), + Position: nameToken.TextPosition, + } + + return p.document.AddObjectField(objectField) +} + +func (p *Parser) parseValueList() int { + var list ast.ListValue + list.LBRACK = p.mustRead(keyword.LBRACK).TextPosition + + for { + next := p.peek() + switch next { + case keyword.RBRACK: + list.RBRACK = p.read().TextPosition + p.document.ListValues = append(p.document.ListValues, list) + return len(p.document.ListValues) - 1 + default: + value := p.ParseValue() + p.document.Values = append(p.document.Values, value) + ref := len(p.document.Values) - 1 + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + } + + if p.report.HasErrors() { + return ast.InvalidRef + } + } +} + +func (p *Parser) parseNegativeNumberValue() (value ast.Value) { + negativeSign := p.mustRead(keyword.SUB).TextPosition + switch p.peek() { + case keyword.INTEGER: + value.Kind = ast.ValueKindInteger + value.Ref, _ = p.parseIntegerValue(&negativeSign) + value.Position = negativeSign + case keyword.FLOAT: + value.Kind = ast.ValueKindFloat + value.Ref, _ = p.parseFloatValue(&negativeSign) + value.Position = negativeSign + default: + p.errUnexpectedToken(p.read(), keyword.INTEGER, keyword.FLOAT) + } + + return +} + +func (p *Parser) parseFloatValue(negativeSign *position.Position) (ref int, pos position.Position) { + + value := p.mustRead(keyword.FLOAT) + + if negativeSign != nil && negativeSign.CharEnd != value.TextPosition.CharStart { + p.errUnexpectedToken(value) + return ast.InvalidRef, position.Position{} + } + + floatValue := ast.FloatValue{ + Raw: value.Literal, + } + if negativeSign != nil { + floatValue.Negative = true + floatValue.NegativeSign = *negativeSign + } + + return p.document.AddFloatValue(floatValue), value.TextPosition +} + +func (p *Parser) parseIntegerValue(negativeSign *position.Position) (ref int, pos position.Position) { + + value := p.mustRead(keyword.INTEGER) + + if negativeSign != nil && negativeSign.CharEnd != value.TextPosition.CharStart { + p.errUnexpectedToken(value) + return ast.InvalidRef, position.Position{} + } + + intValue := ast.IntValue{ + Raw: value.Literal, + } + if negativeSign != nil { + intValue.Negative = true + intValue.NegativeSign = *negativeSign + } + + p.document.IntValues = append(p.document.IntValues, intValue) + return len(p.document.IntValues) - 1, value.TextPosition +} + +func (p *Parser) parseVariableValue() (ref int, pos position.Position) { + dollar := p.mustRead(keyword.DOLLAR) + var value token.Token + + next := p.peek() + switch next { + case keyword.IDENT: + value = p.read() + default: + p.errUnexpectedToken(p.read(), keyword.IDENT) + return ast.InvalidRef, position.Position{} + } + + if dollar.TextPosition.CharEnd != value.TextPosition.CharStart { + p.errUnexpectedToken(p.read(), keyword.IDENT) + return ast.InvalidRef, position.Position{} + } + + variable := ast.VariableValue{ + Dollar: dollar.TextPosition, + Name: value.Literal, + } + + p.document.VariableValues = append(p.document.VariableValues, variable) + return len(p.document.VariableValues) - 1, dollar.TextPosition +} + +func (p *Parser) parseBooleanValue() (ref int, pos position.Position) { + value := p.read() + identKey := p.identKeywordToken(value) + switch identKey { + case identkeyword.FALSE: + return 0, value.TextPosition + case identkeyword.TRUE: + return 1, value.TextPosition + default: + p.errUnexpectedIdentKey(value, identKey, identkeyword.TRUE, identkeyword.FALSE) + return ast.InvalidRef, position.Position{} + } +} + +func (p *Parser) parseEnumValue() (ref int, pos position.Position) { + value := p.mustRead(keyword.IDENT) + + enum := ast.EnumValue{ + Name: value.Literal, + } + + return p.document.AddEnumValue(enum), value.TextPosition +} + +func (p *Parser) parseStringValue() (ref int, pos position.Position) { + value := p.read() + if value.Keyword != keyword.STRING && value.Keyword != keyword.BLOCKSTRING { + p.errUnexpectedToken(value, keyword.STRING, keyword.BLOCKSTRING) + return ast.InvalidRef, position.Position{} + } + stringValue := ast.StringValue{ + Content: value.Literal, + BlockString: value.Keyword == keyword.BLOCKSTRING, + } + + return p.document.AddStringValue(stringValue), value.TextPosition +} + +func (p *Parser) parseObjectTypeDefinition(description *ast.Description) { + var objectTypeDefinition ast.ObjectTypeDefinition + if description != nil { + objectTypeDefinition.Description = *description + } + objectTypeDefinition.TypeLiteral = p.mustReadIdentKey(identkeyword.TYPE).TextPosition + objectTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEqualsIdentKey(identkeyword.IMPLEMENTS) { + objectTypeDefinition.ImplementsInterfaces = p.parseImplementsInterfaces() + } + if p.peekEquals(keyword.AT) { + objectTypeDefinition.Directives = p.parseDirectiveList() + objectTypeDefinition.HasDirectives = len(objectTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + objectTypeDefinition.FieldsDefinition = p.parseFieldDefinitionList() + objectTypeDefinition.HasFieldDefinitions = len(objectTypeDefinition.FieldsDefinition.Refs) > 0 + } + p.document.ObjectTypeDefinitions = append(p.document.ObjectTypeDefinitions, objectTypeDefinition) + ref := len(p.document.ObjectTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindObjectTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(objectTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) indexNode(key ast.ByteSliceReference, value ast.Node) { + name := p.document.Input.ByteSlice(key) + p.document.Index.AddNodeBytes(name, value) +} + +func (p *Parser) parseRootDescription() { + + description := p.parseDescription() + + key, literal := p.peekLiteral() + if key != keyword.IDENT { + p.errUnexpectedToken(p.read(), keyword.IDENT) + return + } + + next := p.identKeywordSliceRef(literal) + + switch next { + case identkeyword.TYPE: + p.parseObjectTypeDefinition(&description) + case identkeyword.INPUT: + p.parseInputObjectTypeDefinition(&description) + case identkeyword.SCALAR: + p.parseScalarTypeDefinition(&description) + case identkeyword.INTERFACE: + p.parseInterfaceTypeDefinition(&description) + case identkeyword.UNION: + p.parseUnionTypeDefinition(&description) + case identkeyword.ENUM: + p.parseEnumTypeDefinition(&description) + case identkeyword.DIRECTIVE: + p.parseDirectiveDefinition(&description) + case identkeyword.EXTEND: + p.parseExtension() + default: + p.errUnexpectedIdentKey(p.read(), next, identkeyword.TYPE, identkeyword.INPUT, identkeyword.SCALAR, identkeyword.INTERFACE, identkeyword.UNION, identkeyword.ENUM, identkeyword.DIRECTIVE) + } +} + +func (p *Parser) parseImplementsInterfaces() (list ast.TypeList) { + + p.read() // implements + + acceptIdent := true + acceptAnd := true + + for { + next := p.peek() + switch next { + case keyword.AND: + if acceptAnd { + acceptAnd = false + acceptIdent = true + p.read() + } else { + p.errUnexpectedToken(p.read()) + return + } + case keyword.IDENT: + if acceptIdent { + acceptIdent = false + acceptAnd = true + name := p.read() + ref := p.document.AddNamedTypeWithPosition(name.Literal, name.TextPosition) + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + } else { + p.errUnexpectedToken(p.read()) + return + } + default: + if acceptIdent { + p.errUnexpectedToken(p.read()) + } + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseFieldDefinitionList() (list ast.FieldDefinitionList) { + + list.LBRACE = p.mustRead(keyword.LBRACE).TextPosition + + refsInitialized := false + + for { + + next := p.peek() + + switch next { + case keyword.RBRACE: + list.RBRACE = p.read().TextPosition + return + case keyword.STRING, keyword.BLOCKSTRING, keyword.IDENT: + ref := p.parseFieldDefinition() + if !refsInitialized { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + refsInitialized = true + } + list.Refs = append(list.Refs, ref) + default: + p.errUnexpectedToken(p.read()) + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseFieldDefinition() int { + + var fieldDefinition ast.FieldDefinition + + name := p.peek() + switch name { + case keyword.STRING, keyword.BLOCKSTRING: + fieldDefinition.Description = p.parseDescription() + case keyword.IDENT: + break + default: + p.errUnexpectedToken(p.read()) + return ast.InvalidRef + } + + nameToken := p.read() + if nameToken.Keyword != keyword.IDENT { + p.errUnexpectedToken(nameToken, keyword.IDENT) + return ast.InvalidRef + } + + fieldDefinition.Name = nameToken.Literal + if p.peekEquals(keyword.LPAREN) { + fieldDefinition.ArgumentsDefinition = p.parseInputValueDefinitionList(keyword.RPAREN) + fieldDefinition.HasArgumentsDefinitions = len(fieldDefinition.ArgumentsDefinition.Refs) > 0 + } + fieldDefinition.Colon = p.mustRead(keyword.COLON).TextPosition + fieldDefinition.Type = p.ParseType() + if p.peek() == keyword.AT { + fieldDefinition.Directives = p.parseDirectiveList() + fieldDefinition.HasDirectives = len(fieldDefinition.Directives.Refs) > 0 + } + + p.document.FieldDefinitions = append(p.document.FieldDefinitions, fieldDefinition) + return len(p.document.FieldDefinitions) - 1 +} + +func (p *Parser) parseNamedType() (ref int) { + ident := p.mustRead(keyword.IDENT) + + return p.document.AddNamedTypeWithPosition(ident.Literal, ident.TextPosition) +} + +func (p *Parser) ParseType() (ref int) { + + first := p.peek() + + if first == keyword.IDENT { + tok := p.read() + ref = p.document.AddNamedTypeWithPosition(tok.Literal, tok.TextPosition) + } else if first == keyword.LBRACK { + + openList := p.read() + ofType := p.ParseType() + closeList := p.mustRead(keyword.RBRACK) + + ref = p.document.AddListTypeWithPosition(ofType, openList.TextPosition, closeList.TextPosition) + } else { + p.errUnexpectedToken(p.read(), keyword.IDENT, keyword.LBRACK) + return + } + + next := p.peek() + if next == keyword.BANG { + bangPosition := p.read().TextPosition + if p.peek() == keyword.BANG { + p.errUnexpectedToken(p.read()) + return + } + + ref = p.document.AddNonNullTypeWithBangPosition(ref, bangPosition) + } + + return +} + +func (p *Parser) parseDescription() ast.Description { + tok := p.read() + return ast.Description{ + IsDefined: true, + Content: tok.Literal, + Position: tok.TextPosition, + IsBlockString: tok.Keyword == keyword.BLOCKSTRING, + } +} + +func (p *Parser) parseInputValueDefinitionList(closingKeyword keyword.Keyword) (list ast.InputValueDefinitionList) { + + list.LPAREN = p.read().TextPosition + + for { + next := p.peek() + switch next { + case closingKeyword: + list.RPAREN = p.read().TextPosition + return + case keyword.STRING, keyword.BLOCKSTRING, keyword.IDENT: + ref := p.parseInputValueDefinition() + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + default: + p.errUnexpectedToken(p.read()) + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseInputValueDefinition() int { + + var inputValueDefinition ast.InputValueDefinition + + name := p.peek() + switch name { + case keyword.STRING, keyword.BLOCKSTRING: + inputValueDefinition.Description = p.parseDescription() + case keyword.IDENT: + break + default: + p.errUnexpectedToken(p.read()) + return ast.InvalidRef + } + + inputValueDefinition.Name = p.read().Literal + inputValueDefinition.Colon = p.mustRead(keyword.COLON).TextPosition + inputValueDefinition.Type = p.ParseType() + if p.peekEquals(keyword.EQUALS) { + equals := p.read() + inputValueDefinition.DefaultValue.IsDefined = true + inputValueDefinition.DefaultValue.Equals = equals.TextPosition + inputValueDefinition.DefaultValue.Value = p.ParseValue() + } + if p.peekEquals(keyword.AT) { + inputValueDefinition.Directives = p.parseDirectiveList() + inputValueDefinition.HasDirectives = len(inputValueDefinition.Directives.Refs) > 0 + } + + p.document.InputValueDefinitions = append(p.document.InputValueDefinitions, inputValueDefinition) + return len(p.document.InputValueDefinitions) - 1 +} + +func (p *Parser) parseInputObjectTypeDefinition(description *ast.Description) { + var inputObjectTypeDefinition ast.InputObjectTypeDefinition + if description != nil { + inputObjectTypeDefinition.Description = *description + } + inputObjectTypeDefinition.InputLiteral = p.mustReadIdentKey(identkeyword.INPUT).TextPosition + inputObjectTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + inputObjectTypeDefinition.Directives = p.parseDirectiveList() + inputObjectTypeDefinition.HasDirectives = len(inputObjectTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + inputObjectTypeDefinition.InputFieldsDefinition = p.parseInputValueDefinitionList(keyword.RBRACE) + inputObjectTypeDefinition.HasInputFieldsDefinition = len(inputObjectTypeDefinition.InputFieldsDefinition.Refs) > 0 + } + p.document.InputObjectTypeDefinitions = append(p.document.InputObjectTypeDefinitions, inputObjectTypeDefinition) + ref := len(p.document.InputObjectTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindInputObjectTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(inputObjectTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseScalarTypeDefinition(description *ast.Description) { + var scalarTypeDefinition ast.ScalarTypeDefinition + if description != nil { + scalarTypeDefinition.Description = *description + } + scalarTypeDefinition.ScalarLiteral = p.mustReadIdentKey(identkeyword.SCALAR).TextPosition + scalarTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + scalarTypeDefinition.Directives = p.parseDirectiveList() + scalarTypeDefinition.HasDirectives = len(scalarTypeDefinition.Directives.Refs) > 0 + } + p.document.ScalarTypeDefinitions = append(p.document.ScalarTypeDefinitions, scalarTypeDefinition) + ref := len(p.document.ScalarTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindScalarTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(scalarTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseInterfaceTypeDefinition(description *ast.Description) { + var interfaceTypeDefinition ast.InterfaceTypeDefinition + if description != nil { + interfaceTypeDefinition.Description = *description + } + interfaceTypeDefinition.InterfaceLiteral = p.mustReadIdentKey(identkeyword.INTERFACE).TextPosition + interfaceTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEqualsIdentKey(identkeyword.IMPLEMENTS) { + interfaceTypeDefinition.ImplementsInterfaces = p.parseImplementsInterfaces() + } + if p.peekEquals(keyword.AT) { + interfaceTypeDefinition.Directives = p.parseDirectiveList() + interfaceTypeDefinition.HasDirectives = len(interfaceTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + interfaceTypeDefinition.FieldsDefinition = p.parseFieldDefinitionList() + interfaceTypeDefinition.HasFieldDefinitions = len(interfaceTypeDefinition.FieldsDefinition.Refs) > 0 + } + p.document.InterfaceTypeDefinitions = append(p.document.InterfaceTypeDefinitions, interfaceTypeDefinition) + ref := len(p.document.InterfaceTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindInterfaceTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(interfaceTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseUnionTypeDefinition(description *ast.Description) { + var unionTypeDefinition ast.UnionTypeDefinition + if description != nil { + unionTypeDefinition.Description = *description + } + unionTypeDefinition.UnionLiteral = p.mustReadIdentKey(identkeyword.UNION).TextPosition + unionTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + unionTypeDefinition.Directives = p.parseDirectiveList() + unionTypeDefinition.HasDirectives = len(unionTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.EQUALS) { + unionTypeDefinition.Equals = p.mustRead(keyword.EQUALS).TextPosition + unionTypeDefinition.UnionMemberTypes = p.parseUnionMemberTypes() + unionTypeDefinition.HasUnionMemberTypes = len(unionTypeDefinition.UnionMemberTypes.Refs) > 0 + } + p.document.UnionTypeDefinitions = append(p.document.UnionTypeDefinitions, unionTypeDefinition) + ref := len(p.document.UnionTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindUnionTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(unionTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseUnionMemberTypes() (list ast.TypeList) { + + acceptPipe := true + acceptIdent := true + expectNext := true + + for { + next := p.peek() + switch next { + case keyword.PIPE: + if acceptPipe { + acceptPipe = false + acceptIdent = true + expectNext = true + p.read() + } else { + p.errUnexpectedToken(p.read()) + return + } + case keyword.IDENT: + if acceptIdent { + acceptPipe = true + acceptIdent = false + expectNext = false + + ident := p.read() + + ref := p.document.AddNamedTypeWithPosition(ident.Literal, ident.TextPosition) + + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + } else { + return + } + default: + if expectNext { + p.errUnexpectedToken(p.read()) + } + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseEnumTypeDefinition(description *ast.Description) { + var enumTypeDefinition ast.EnumTypeDefinition + if description != nil { + enumTypeDefinition.Description = *description + } + enumTypeDefinition.EnumLiteral = p.mustReadIdentKey(identkeyword.ENUM).TextPosition + enumTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + enumTypeDefinition.Directives = p.parseDirectiveList() + enumTypeDefinition.HasDirectives = len(enumTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + enumTypeDefinition.EnumValuesDefinition = p.parseEnumValueDefinitionList() + enumTypeDefinition.HasEnumValuesDefinition = len(enumTypeDefinition.EnumValuesDefinition.Refs) > 0 + } + p.document.EnumTypeDefinitions = append(p.document.EnumTypeDefinitions, enumTypeDefinition) + ref := len(p.document.EnumTypeDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindEnumTypeDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(enumTypeDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseEnumValueDefinitionList() (list ast.EnumValueDefinitionList) { + + list.LBRACE = p.mustRead(keyword.LBRACE).TextPosition + + for { + next := p.peek() + switch next { + case keyword.STRING, keyword.BLOCKSTRING, keyword.IDENT: + ref := p.parseEnumValueDefinition() + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + case keyword.RBRACE: + list.RBRACE = p.read().TextPosition + return + default: + p.errUnexpectedToken(p.read()) + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseEnumValueDefinition() int { + var enumValueDefinition ast.EnumValueDefinition + next := p.peek() + switch next { + case keyword.STRING, keyword.BLOCKSTRING: + enumValueDefinition.Description = p.parseDescription() + case keyword.IDENT: + break + default: + p.errUnexpectedToken(p.read()) + return ast.InvalidRef + } + + enumValueDefinition.EnumValue = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + enumValueDefinition.Directives = p.parseDirectiveList() + enumValueDefinition.HasDirectives = len(enumValueDefinition.Directives.Refs) > 0 + } + + p.document.EnumValueDefinitions = append(p.document.EnumValueDefinitions, enumValueDefinition) + return len(p.document.EnumValueDefinitions) - 1 +} + +func (p *Parser) parseDirectiveDefinition(description *ast.Description) { + var directiveDefinition ast.DirectiveDefinition + if description != nil { + directiveDefinition.Description = *description + } + directiveDefinition.DirectiveLiteral = p.mustReadIdentKey(identkeyword.DIRECTIVE).TextPosition + directiveDefinition.At = p.mustRead(keyword.AT).TextPosition + directiveDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.LPAREN) { + directiveDefinition.ArgumentsDefinition = p.parseInputValueDefinitionList(keyword.RPAREN) + directiveDefinition.HasArgumentsDefinitions = len(directiveDefinition.ArgumentsDefinition.Refs) > 0 + } + + if p.peekEqualsIdentKey(identkeyword.REPEATABLE) { + directiveDefinition.Repeatable.IsRepeatable = true + directiveDefinition.Repeatable.Position = p.mustReadIdentKey(identkeyword.REPEATABLE).TextPosition + } + + directiveDefinition.On = p.mustReadIdentKey(identkeyword.ON).TextPosition + p.parseDirectiveLocations(&directiveDefinition.DirectiveLocations) + p.document.DirectiveDefinitions = append(p.document.DirectiveDefinitions, directiveDefinition) + ref := len(p.document.DirectiveDefinitions) - 1 + node := ast.Node{ + Kind: ast.NodeKindDirectiveDefinition, + Ref: ref, + } + if p.shouldIndex { + p.indexNode(directiveDefinition.Name, node) + } + p.document.RootNodes = append(p.document.RootNodes, node) +} + +func (p *Parser) parseDirectiveLocations(locations *ast.DirectiveLocations) { + acceptPipe := true + acceptIdent := true + expectNext := true + for { + next := p.peek() + switch next { + case keyword.IDENT: + if acceptIdent { + acceptIdent = false + acceptPipe = true + expectNext = false + + ident := p.read() + raw := p.document.Input.ByteSlice(ident.Literal) + err := locations.SetFromRaw(raw) + if err != nil { + p.report.AddExternalError(operationreport.ExternalError{ + Message: fmt.Sprintf("invalid directive location: %s", unsafebytes.BytesToString(raw)), + Locations: []graphqlerrors.Location{ + { + Line: ident.TextPosition.LineStart, + Column: ident.TextPosition.CharStart, + }, + }, + }) + if p.reportInternalErrors { + p.report.AddInternalError(err) + } + return + } + + } else { + return + } + case keyword.PIPE: + if acceptPipe { + acceptPipe = false + acceptIdent = true + expectNext = true + p.read() + } else { + p.errUnexpectedToken(p.read()) + return + } + default: + if expectNext { + p.errUnexpectedToken(p.read()) + } + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseSelectionSet() (int, bool) { + + var set ast.SelectionSet + + set.SelectionRefs = p.document.Refs[p.document.NextRefIndex()][:0] + lbraceToken := p.mustRead(keyword.LBRACE) + set.LBrace = lbraceToken.TextPosition + + for { + switch p.peek() { + case keyword.RBRACE: + rbraceToken := p.read() + set.RBrace = rbraceToken.TextPosition + + if len(set.SelectionRefs) == 0 { + return 0, false + } + + p.document.SelectionSets = append(p.document.SelectionSets, set) + return len(p.document.SelectionSets) - 1, true + + case keyword.IDENT, keyword.SPREAD: + if cap(set.SelectionRefs) == 0 { + set.SelectionRefs = p.document.Refs[p.document.NextRefIndex()][:0] + } + ref := p.parseSelection() + set.SelectionRefs = append(set.SelectionRefs, ref) + default: + p.errUnexpectedToken(p.read(), keyword.RBRACE, keyword.IDENT, keyword.SPREAD) + } + + if p.report.HasErrors() { + return ast.InvalidRef, false + } + } +} + +func (p *Parser) parseSelection() int { + next := p.peek() + switch next { + case keyword.IDENT: + p.document.Selections = append(p.document.Selections, ast.Selection{ + Kind: ast.SelectionKindField, + Ref: p.parseField(), + }) + return len(p.document.Selections) - 1 + case keyword.SPREAD: + spreadToken := p.read() + return p.parseFragmentSelection(spreadToken.TextPosition) + default: + nextToken := p.read() + p.errUnexpectedToken(nextToken, keyword.IDENT, keyword.SPREAD) + return ast.InvalidRef + } +} + +func (p *Parser) parseFragmentSelection(spread position.Position) int { + + var selection ast.Selection + + next, literal := p.peekLiteral() + switch next { + case keyword.LBRACE, keyword.AT: + selection.Kind = ast.SelectionKindInlineFragment + selection.Ref = p.parseInlineFragment(spread) + case keyword.IDENT: + key := p.identKeywordSliceRef(literal) + switch key { + case identkeyword.ON: + selection.Kind = ast.SelectionKindInlineFragment + selection.Ref = p.parseInlineFragment(spread) + default: + selection.Kind = ast.SelectionKindFragmentSpread + selection.Ref = p.parseFragmentSpread(spread) + } + default: + nextToken := p.read() + p.errUnexpectedToken(nextToken, keyword.IDENT) + } + + p.document.Selections = append(p.document.Selections, selection) + return len(p.document.Selections) - 1 +} + +func (p *Parser) parseField() int { + + var field ast.Field + + firstToken := p.read() + if firstToken.Keyword != keyword.IDENT { + p.errUnexpectedToken(firstToken, keyword.IDENT) + } + + if p.peek() == keyword.COLON { + field.Alias.IsDefined = true + field.Alias.Name = firstToken.Literal + colonToken := p.read() + field.Alias.Colon = colonToken.TextPosition + nameToken := p.mustRead(keyword.IDENT) + field.Name = nameToken.Literal + } else { + field.Name = firstToken.Literal + } + field.Position = firstToken.TextPosition + + if p.peekEquals(keyword.LPAREN) { + field.Arguments = p.parseArgumentList() + field.HasArguments = len(field.Arguments.Refs) > 0 + } + if p.peekEquals(keyword.AT) { + field.Directives = p.parseDirectiveList() + field.HasDirectives = len(field.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + field.SelectionSet, field.HasSelections = p.parseSelectionSet() + } + + p.document.Fields = append(p.document.Fields, field) + return len(p.document.Fields) - 1 +} + +func (p *Parser) parseFragmentSpread(spread position.Position) int { + var fragmentSpread ast.FragmentSpread + fragmentSpread.Spread = spread + fragmentSpread.FragmentName = p.mustReadExceptIdentKey(identkeyword.ON).Literal + if p.peekEquals(keyword.AT) { + fragmentSpread.Directives = p.parseDirectiveList() + fragmentSpread.HasDirectives = len(fragmentSpread.Directives.Refs) > 0 + } + p.document.FragmentSpreads = append(p.document.FragmentSpreads, fragmentSpread) + return len(p.document.FragmentSpreads) - 1 +} + +func (p *Parser) parseInlineFragment(spread position.Position) int { + fragment := ast.InlineFragment{ + TypeCondition: ast.TypeCondition{ + Type: ast.InvalidRef, + }, + } + fragment.Spread = spread + if p.peekEqualsIdentKey(identkeyword.ON) { + fragment.TypeCondition = p.parseTypeCondition() + } + if p.peekEquals(keyword.AT) { + fragment.Directives = p.parseDirectiveList() + fragment.HasDirectives = len(fragment.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + fragment.SelectionSet, fragment.HasSelections = p.parseSelectionSet() + } + p.document.InlineFragments = append(p.document.InlineFragments, fragment) + return len(p.document.InlineFragments) - 1 +} + +func (p *Parser) parseTypeCondition() (typeCondition ast.TypeCondition) { + typeCondition.On = p.mustReadIdentKey(identkeyword.ON).TextPosition + typeCondition.Type = p.parseNamedType() + return +} + +func (p *Parser) parseOperationDefinition() { + + var operationDefinition ast.OperationDefinition + + next, literal := p.peekLiteral() + switch next { + case keyword.IDENT: + key := p.identKeywordSliceRef(literal) + switch key { + case identkeyword.QUERY: + operationDefinition.OperationTypeLiteral = p.read().TextPosition + operationDefinition.OperationType = ast.OperationTypeQuery + case identkeyword.MUTATION: + operationDefinition.OperationTypeLiteral = p.read().TextPosition + operationDefinition.OperationType = ast.OperationTypeMutation + case identkeyword.SUBSCRIPTION: + operationDefinition.OperationTypeLiteral = p.read().TextPosition + operationDefinition.OperationType = ast.OperationTypeSubscription + default: + p.errUnexpectedIdentKey(p.read(), key, identkeyword.QUERY, identkeyword.MUTATION, identkeyword.SUBSCRIPTION) + return + } + case keyword.LBRACE: + operationDefinition.OperationType = ast.OperationTypeQuery + operationDefinition.SelectionSet, operationDefinition.HasSelections = p.parseSelectionSet() + p.document.OperationDefinitions = append(p.document.OperationDefinitions, operationDefinition) + ref := len(p.document.OperationDefinitions) - 1 + rootNode := ast.Node{ + Kind: ast.NodeKindOperationDefinition, + Ref: ref, + } + p.document.RootNodes = append(p.document.RootNodes, rootNode) + return + default: + p.errUnexpectedToken(p.read(), keyword.IDENT, keyword.LBRACE) + return + } + + if p.peekEquals(keyword.IDENT) { + operationDefinition.Name = p.read().Literal + } + if p.peekEquals(keyword.LPAREN) { + operationDefinition.VariableDefinitions = p.parseVariableDefinitionList() + operationDefinition.HasVariableDefinitions = len(operationDefinition.VariableDefinitions.Refs) > 0 + } + if p.peekEquals(keyword.AT) { + operationDefinition.Directives = p.parseDirectiveList() + operationDefinition.HasDirectives = len(operationDefinition.Directives.Refs) > 0 + } + + operationDefinition.SelectionSet, operationDefinition.HasSelections = p.parseSelectionSet() + + p.document.OperationDefinitions = append(p.document.OperationDefinitions, operationDefinition) + ref := len(p.document.OperationDefinitions) - 1 + rootNode := ast.Node{ + Kind: ast.NodeKindOperationDefinition, + Ref: ref, + } + p.document.RootNodes = append(p.document.RootNodes, rootNode) +} + +func (p *Parser) parseVariableDefinitionList() (list ast.VariableDefinitionList) { + + list.LPAREN = p.mustRead(keyword.LPAREN).TextPosition + + for { + next := p.peek() + switch next { + case keyword.RPAREN: + list.RPAREN = p.read().TextPosition + return + case keyword.DOLLAR: + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + ref := p.parseVariableDefinition() + if cap(list.Refs) == 0 { + list.Refs = p.document.Refs[p.document.NextRefIndex()][:0] + } + list.Refs = append(list.Refs, ref) + default: + p.errUnexpectedToken(p.read(), keyword.RPAREN, keyword.DOLLAR) + return + } + + if p.report.HasErrors() { + return + } + } +} + +func (p *Parser) parseVariableDefinition() int { + + var variableDefinition ast.VariableDefinition + + variableDefinition.VariableValue.Kind = ast.ValueKindVariable + variableDefinition.VariableValue.Ref, variableDefinition.VariableValue.Position = p.parseVariableValue() + + variableDefinition.Colon = p.mustRead(keyword.COLON).TextPosition + variableDefinition.Type = p.ParseType() + if p.peekEquals(keyword.EQUALS) { + variableDefinition.DefaultValue = p.parseDefaultValue() + } + if p.peekEquals(keyword.AT) { + variableDefinition.Directives = p.parseDirectiveList() + variableDefinition.HasDirectives = len(variableDefinition.Directives.Refs) > 0 + } + p.document.VariableDefinitions = append(p.document.VariableDefinitions, variableDefinition) + return len(p.document.VariableDefinitions) - 1 +} + +func (p *Parser) parseDefaultValue() ast.DefaultValue { + equals := p.mustRead(keyword.EQUALS).TextPosition + value := p.ParseValue() + return ast.DefaultValue{ + IsDefined: true, + Equals: equals, + Value: value, + } +} + +func (p *Parser) parseFragmentDefinition() { + var fragmentDefinition ast.FragmentDefinition + fragmentDefinition.FragmentLiteral = p.mustReadIdentKey(identkeyword.FRAGMENT).TextPosition + fragmentDefinition.Name = p.mustRead(keyword.IDENT).Literal + fragmentDefinition.TypeCondition = p.parseTypeCondition() + if p.peekEquals(keyword.AT) { + fragmentDefinition.Directives = p.parseDirectiveList() + fragmentDefinition.HasDirectives = len(fragmentDefinition.Directives.Refs) > 0 + } + fragmentDefinition.SelectionSet, fragmentDefinition.HasSelections = p.parseSelectionSet() + p.document.FragmentDefinitions = append(p.document.FragmentDefinitions, fragmentDefinition) + + ref := len(p.document.FragmentDefinitions) - 1 + p.document.RootNodes = append(p.document.RootNodes, ast.Node{ + Kind: ast.NodeKindFragmentDefinition, + Ref: ref, + }) +} + +func (p *Parser) parseExtension() { + extend := p.mustReadIdentKey(identkeyword.EXTEND).TextPosition + next, literal := p.peekLiteral() + + if next != keyword.IDENT { + p.errUnexpectedToken(p.read(), keyword.IDENT) + return + } + + key := p.identKeywordSliceRef(literal) + + switch key { + case identkeyword.SCHEMA: + p.parseSchemaExtension(extend) + case identkeyword.TYPE: + p.parseObjectTypeExtension(extend) + case identkeyword.INTERFACE: + p.parseInterfaceTypeExtension(extend) + case identkeyword.SCALAR: + p.parseScalarTypeExtension(extend) + case identkeyword.UNION: + p.parseUnionTypeExtension(extend) + case identkeyword.ENUM: + p.parseEnumTypeExtension(extend) + case identkeyword.INPUT: + p.parseInputObjectTypeExtension(extend) + default: + p.errUnexpectedIdentKey(p.read(), key, identkeyword.SCHEMA, identkeyword.TYPE, identkeyword.INTERFACE, identkeyword.SCALAR, identkeyword.UNION, identkeyword.ENUM, identkeyword.INPUT, identkeyword.EXTEND) + } +} + +func (p *Parser) parseSchemaExtension(extend position.Position) { + schemaLiteral := p.read() + schemaDefinition := ast.SchemaDefinition{ + SchemaLiteral: schemaLiteral.TextPosition, + } + if p.peekEquals(keyword.AT) { + schemaDefinition.Directives = p.parseDirectiveList() + schemaDefinition.HasDirectives = len(schemaDefinition.Directives.Refs) > 0 + } + p.parseRootOperationTypeDefinitionList(&schemaDefinition.RootOperationTypeDefinitions) + + schemaExtension := ast.SchemaExtension{ + ExtendLiteral: extend, + SchemaDefinition: schemaDefinition, + } + p.document.SchemaExtensions = append(p.document.SchemaExtensions, schemaExtension) + ref := len(p.document.SchemaExtensions) - 1 + p.document.RootNodes = append(p.document.RootNodes, ast.Node{Ref: ref, Kind: ast.NodeKindSchemaExtension}) +} + +func (p *Parser) parseObjectTypeExtension(extend position.Position) { + var objectTypeDefinition ast.ObjectTypeDefinition + objectTypeDefinition.TypeLiteral = p.mustReadIdentKey(identkeyword.TYPE).TextPosition + objectTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEqualsIdentKey(identkeyword.IMPLEMENTS) { + objectTypeDefinition.ImplementsInterfaces = p.parseImplementsInterfaces() + } + if p.peekEquals(keyword.AT) { + objectTypeDefinition.Directives = p.parseDirectiveList() + objectTypeDefinition.HasDirectives = len(objectTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + objectTypeDefinition.FieldsDefinition = p.parseFieldDefinitionList() + objectTypeDefinition.HasFieldDefinitions = len(objectTypeDefinition.FieldsDefinition.Refs) > 0 + } + objectTypeExtension := ast.ObjectTypeExtension{ + ExtendLiteral: extend, + ObjectTypeDefinition: objectTypeDefinition, + } + p.document.ObjectTypeExtensions = append(p.document.ObjectTypeExtensions, objectTypeExtension) + ref := len(p.document.ObjectTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindObjectTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(objectTypeDefinition.Name, node) + } +} + +func (p *Parser) parseInterfaceTypeExtension(extend position.Position) { + var interfaceTypeDefinition ast.InterfaceTypeDefinition + interfaceTypeDefinition.InterfaceLiteral = p.mustReadIdentKey(identkeyword.INTERFACE).TextPosition + interfaceTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEqualsIdentKey(identkeyword.IMPLEMENTS) { + interfaceTypeDefinition.ImplementsInterfaces = p.parseImplementsInterfaces() + } + if p.peekEquals(keyword.AT) { + interfaceTypeDefinition.Directives = p.parseDirectiveList() + interfaceTypeDefinition.HasDirectives = len(interfaceTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + interfaceTypeDefinition.FieldsDefinition = p.parseFieldDefinitionList() + interfaceTypeDefinition.HasFieldDefinitions = len(interfaceTypeDefinition.FieldsDefinition.Refs) > 0 + } + interfaceTypeExtension := ast.InterfaceTypeExtension{ + ExtendLiteral: extend, + InterfaceTypeDefinition: interfaceTypeDefinition, + } + p.document.InterfaceTypeExtensions = append(p.document.InterfaceTypeExtensions, interfaceTypeExtension) + ref := len(p.document.InterfaceTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindInterfaceTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(interfaceTypeExtension.Name, node) + } +} + +func (p *Parser) parseScalarTypeExtension(extend position.Position) { + var scalarTypeDefinition ast.ScalarTypeDefinition + scalarTypeDefinition.ScalarLiteral = p.mustReadIdentKey(identkeyword.SCALAR).TextPosition + scalarTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + scalarTypeDefinition.Directives = p.parseDirectiveList() + scalarTypeDefinition.HasDirectives = len(scalarTypeDefinition.Directives.Refs) > 0 + } + scalarTypeExtension := ast.ScalarTypeExtension{ + ExtendLiteral: extend, + ScalarTypeDefinition: scalarTypeDefinition, + } + p.document.ScalarTypeExtensions = append(p.document.ScalarTypeExtensions, scalarTypeExtension) + ref := len(p.document.ScalarTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindScalarTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(scalarTypeExtension.Name, node) + } +} + +func (p *Parser) parseUnionTypeExtension(extend position.Position) { + var unionTypeDefinition ast.UnionTypeDefinition + unionTypeDefinition.UnionLiteral = p.mustReadIdentKey(identkeyword.UNION).TextPosition + unionTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + unionTypeDefinition.Directives = p.parseDirectiveList() + unionTypeDefinition.HasDirectives = len(unionTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.EQUALS) { + unionTypeDefinition.Equals = p.mustRead(keyword.EQUALS).TextPosition + unionTypeDefinition.UnionMemberTypes = p.parseUnionMemberTypes() + unionTypeDefinition.HasUnionMemberTypes = len(unionTypeDefinition.UnionMemberTypes.Refs) > 0 + } + unionTypeExtension := ast.UnionTypeExtension{ + ExtendLiteral: extend, + UnionTypeDefinition: unionTypeDefinition, + } + p.document.UnionTypeExtensions = append(p.document.UnionTypeExtensions, unionTypeExtension) + ref := len(p.document.UnionTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindUnionTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(unionTypeExtension.Name, node) + } +} + +func (p *Parser) parseEnumTypeExtension(extend position.Position) { + var enumTypeDefinition ast.EnumTypeDefinition + enumTypeDefinition.EnumLiteral = p.mustReadIdentKey(identkeyword.ENUM).TextPosition + enumTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + enumTypeDefinition.Directives = p.parseDirectiveList() + enumTypeDefinition.HasDirectives = len(enumTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + enumTypeDefinition.EnumValuesDefinition = p.parseEnumValueDefinitionList() + enumTypeDefinition.HasEnumValuesDefinition = len(enumTypeDefinition.EnumValuesDefinition.Refs) > 0 + } + enumTypeExtension := ast.EnumTypeExtension{ + ExtendLiteral: extend, + EnumTypeDefinition: enumTypeDefinition, + } + p.document.EnumTypeExtensions = append(p.document.EnumTypeExtensions, enumTypeExtension) + ref := len(p.document.EnumTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindEnumTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(enumTypeExtension.Name, node) + } +} + +func (p *Parser) parseInputObjectTypeExtension(extend position.Position) { + var inputObjectTypeDefinition ast.InputObjectTypeDefinition + inputObjectTypeDefinition.InputLiteral = p.mustReadIdentKey(identkeyword.INPUT).TextPosition + inputObjectTypeDefinition.Name = p.mustRead(keyword.IDENT).Literal + if p.peekEquals(keyword.AT) { + inputObjectTypeDefinition.Directives = p.parseDirectiveList() + inputObjectTypeDefinition.HasDirectives = len(inputObjectTypeDefinition.Directives.Refs) > 0 + } + if p.peekEquals(keyword.LBRACE) { + inputObjectTypeDefinition.InputFieldsDefinition = p.parseInputValueDefinitionList(keyword.RBRACE) + inputObjectTypeDefinition.HasInputFieldsDefinition = len(inputObjectTypeDefinition.InputFieldsDefinition.Refs) > 0 + } + inputObjectTypeExtension := ast.InputObjectTypeExtension{ + ExtendLiteral: extend, + InputObjectTypeDefinition: inputObjectTypeDefinition, + } + p.document.InputObjectTypeExtensions = append(p.document.InputObjectTypeExtensions, inputObjectTypeExtension) + ref := len(p.document.InputObjectTypeExtensions) - 1 + node := ast.Node{Ref: ref, Kind: ast.NodeKindInputObjectTypeExtension} + p.document.RootNodes = append(p.document.RootNodes, node) + + if p.shouldIndex { + p.indexNode(inputObjectTypeExtension.Name, node) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser_token_helpers.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser_token_helpers.go new file mode 100644 index 00000000000..357ea464eb0 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/parser_token_helpers.go @@ -0,0 +1,90 @@ +package astparser + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token" +) + +// read - reads and returns next token +func (p *Parser) read() token.Token { + return p.tokenizer.Read() +} + +// peek - returns token next to currentToken +// returns keyword.EOF when reached end of document +func (p *Parser) peek() keyword.Keyword { + tok := p.tokenizer.Peek() + return tok.Keyword +} + +// peekLiteral - returns keyword.Keyword and literal ast.ByteSliceReference of token next to currentToken +// returns keyword.EOF when reached end of document +func (p *Parser) peekLiteral() (keyword.Keyword, ast.ByteSliceReference) { + tok := p.tokenizer.Peek() + if tok.Keyword != keyword.EOF { + return tok.Keyword, tok.Literal + } + return keyword.EOF, ast.ByteSliceReference{} +} + +// peekEquals - checks that next token keyword is equal to key +func (p *Parser) peekEquals(key keyword.Keyword) bool { + return p.peek() == key +} + +// peekEqualsIdentKey - checks that next token is an identifier of the given key +func (p *Parser) peekEqualsIdentKey(identKey identkeyword.IdentKeyword) bool { + key, literal := p.peekLiteral() + if key != keyword.IDENT { + return false + } + actualKey := p.identKeywordSliceRef(literal) + return actualKey == identKey +} + +func (p *Parser) mustRead(key keyword.Keyword) (next token.Token) { + next = p.read() + if next.Keyword != key { + p.errUnexpectedToken(next, key) + } + return +} + +func (p *Parser) mustReadIdentKey(key identkeyword.IdentKeyword) (next token.Token) { + next = p.read() + if next.Keyword != keyword.IDENT { + p.errUnexpectedToken(next, keyword.IDENT) + } + identKey := p.identKeywordToken(next) + if identKey != key { + p.errUnexpectedIdentKey(next, identKey, key) + } + return +} + +func (p *Parser) mustReadExceptIdentKey(key identkeyword.IdentKeyword) (next token.Token) { + next = p.read() + if next.Keyword != keyword.IDENT { + p.errUnexpectedToken(next, keyword.IDENT) + } + identKey := p.identKeywordToken(next) + if identKey == key { + p.errUnexpectedIdentKey(next, identKey, key) + } + return +} + +func (p *Parser) mustReadOneOf(keys ...identkeyword.IdentKeyword) (token.Token, identkeyword.IdentKeyword) { + next := p.read() + + identKey := p.identKeywordToken(next) + for _, expectation := range keys { + if identKey == expectation { + return next, identKey + } + } + p.errUnexpectedToken(next) + return next, identKey +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/tokenizer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/tokenizer.go new file mode 100644 index 00000000000..db375ec99c4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astparser/tokenizer.go @@ -0,0 +1,97 @@ +package astparser + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token" +) + +// Tokenizer takes a raw input and turns it into set of tokens +type Tokenizer struct { + lexer *lexer.Lexer + tokens []token.Token + maxTokens int + currentToken int + skipComments bool +} + +// NewTokenizer returns a new tokenizer +func NewTokenizer() *Tokenizer { + return &Tokenizer{ + tokens: make([]token.Token, 256), + lexer: &lexer.Lexer{}, + skipComments: true, + } +} + +func (t *Tokenizer) Tokenize(input *ast.Input) { + t.lexer.SetInput(input) + t.tokens = t.tokens[:0] + + for { + next := t.lexer.Read() + if next.Keyword == keyword.EOF { + t.maxTokens = len(t.tokens) + t.currentToken = -1 + return + } + t.tokens = append(t.tokens, next) + } +} + +// hasNextToken - checks that we haven't reached eof +func (t *Tokenizer) hasNextToken(skip int) bool { + return t.currentToken+1+skip < t.maxTokens +} + +// next - increments current token index if hasNextToken +// otherwise returns current token +func (t *Tokenizer) next() int { + if t.hasNextToken(0) { + t.currentToken++ + } + return t.currentToken +} + +// Read - increments currentToken index and return token if hasNextToken +// otherwise returns keyword.EOF +func (t *Tokenizer) Read() token.Token { + tok := t.read() + if t.skipComments && tok.Keyword == keyword.COMMENT { + tok = t.read() + } + + return tok +} + +func (t *Tokenizer) read() token.Token { + if t.hasNextToken(0) { + return t.tokens[t.next()] + } + + return token.Token{ + Keyword: keyword.EOF, + } +} + +// Peek - returns token next to currentToken if hasNextToken +// otherwise returns keyword.EOF +func (t *Tokenizer) Peek() token.Token { + tok := t.peek(0) + if t.skipComments && tok.Keyword == keyword.COMMENT { + tok = t.peek(1) + } + + return tok +} + +func (t *Tokenizer) peek(skip int) token.Token { + if t.hasNextToken(skip) { + nextIndex := t.currentToken + 1 + skip + return t.tokens[nextIndex] + } + return token.Token{ + Keyword: keyword.EOF, + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astprinter/astprinter.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astprinter/astprinter.go new file mode 100644 index 00000000000..9004c94ef45 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astprinter/astprinter.go @@ -0,0 +1,1031 @@ +// Package astprinter takes a GraphQL document and prints it as a String with optional indentation. +package astprinter + +import ( + "bytes" + "io" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +// Print takes a document as well as a definition (optional) and prints it to the io.Writer. +// The definition is only necessary in case a GraphQL Operation should be printed. +func Print(document, definition *ast.Document, out io.Writer) error { + printer := Printer{} + return printer.Print(document, definition, out) +} + +// PrintIndent is the same as Print but accepts an additional indent parameter to set indentation. +func PrintIndent(document, definition *ast.Document, indent []byte, out io.Writer) error { + printer := Printer{ + indent: indent, + } + return printer.Print(document, definition, out) +} + +// PrintString is the same as Print but returns a string instead of writing to an io.Writer +func PrintString(document, definition *ast.Document) (string, error) { + buff := &bytes.Buffer{} + err := Print(document, definition, buff) + out := buff.String() + return out, err +} + +// PrintStringIndent is the same as PrintIndent but returns a string instead of writing to an io.Writer +func PrintStringIndent(document, definition *ast.Document, indent string) (string, error) { + buff := &bytes.Buffer{} + err := PrintIndent(document, definition, []byte(indent), buff) + out := buff.String() + return out, err +} + +// Printer walks a GraphQL document and prints it as a string +type Printer struct { + indent []byte + visitor printVisitor + walker astvisitor.SimpleWalker + registered bool +} + +// Print starts the actual AST printing +// Keep a printer and re-use it in case you'd like to print ASTs in the hot path. +func (p *Printer) Print(document, definition *ast.Document, out io.Writer) error { + p.visitor.indent = p.indent + p.visitor.err = nil + p.visitor.document = document + p.visitor.out = out + p.visitor.SimpleWalker = &p.walker + if !p.registered { + p.walker.SetVisitor(&p.visitor) + } + return p.walker.Walk(p.visitor.document, definition) +} + +type printVisitor struct { + *astvisitor.SimpleWalker + document *ast.Document + out io.Writer + err error + + indent []byte + inputValueDefinitionOpener []byte + inputValueDefinitionCloser []byte + isFirstDirectiveLocation bool + isDirectiveRepeatable bool +} + +func (p *printVisitor) write(data []byte) { + if p.err != nil { + return + } + _, p.err = p.out.Write(data) +} + +func (p *printVisitor) indentationDepth() (depth int) { + + if len(p.Ancestors) == 0 { + return 0 + } + + switch p.Ancestors[0].Kind { + case ast.NodeKindOperationDefinition, + ast.NodeKindFragmentDefinition: + default: + return 2 + } + + for i := range p.Ancestors { + if p.Ancestors[i].Kind == ast.NodeKindSelectionSet { + depth += 2 + } + } + + return depth +} + +func (p *printVisitor) writeIndented(data []byte) { + if p.err != nil { + return + } + depth := p.indentationDepth() + for i := 0; i < depth; i++ { + _, p.err = p.out.Write(p.indent) + } + _, p.err = p.out.Write(data) +} + +func (p *printVisitor) must(err error) { + if p.err != nil { + return + } + p.err = err +} + +func (p *printVisitor) EnterDirective(ref int) { + if p.document.DirectiveIsFirst(ref, p.Ancestors[len(p.Ancestors)-1]) { + switch p.Ancestors[len(p.Ancestors)-1].Kind { + case ast.NodeKindFieldDefinition: + p.writeFieldType(p.Ancestors[len(p.Ancestors)-1].Ref) + p.write(literal.SPACE) + case ast.NodeKindEnumValueDefinition, + ast.NodeKindInputValueDefinition: + p.write(literal.SPACE) + } + } + + p.write(literal.AT) + p.write(p.document.DirectiveNameBytes(ref)) +} + +func (p *printVisitor) LeaveDirective(ref int) { + if !p.document.DirectiveIsLast(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.SPACE) + return + } + + ancestor := p.Ancestors[len(p.Ancestors)-1] + switch ancestor.Kind { + case ast.NodeKindField: + if p.document.FieldHasSelections(ancestor.Ref) { + p.write(literal.SPACE) + } else if len(p.SelectionsAfter) > 0 { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } + case ast.NodeKindVariableDefinition: + if !p.document.VariableDefinitionsAfter(ancestor.Ref) { + p.write(literal.SPACE) + } + case ast.NodeKindInlineFragment: + if len(p.SelectionsAfter) > 0 { + p.write(literal.SPACE) + } + case ast.NodeKindScalarTypeDefinition, + ast.NodeKindScalarTypeExtension, + ast.NodeKindUnionTypeDefinition, + ast.NodeKindUnionTypeExtension, + ast.NodeKindEnumTypeDefinition, + ast.NodeKindEnumTypeExtension, + ast.NodeKindEnumValueDefinition, + ast.NodeKindFieldDefinition, + ast.NodeKindInputValueDefinition: + return + default: + p.write(literal.SPACE) + } +} + +func (p *printVisitor) EnterVariableDefinition(ref int) { + if !p.document.VariableDefinitionsBefore(ref) { + p.write(literal.LPAREN) + } + + p.must(p.document.PrintValue(p.document.VariableDefinitions[ref].VariableValue, p.out)) + p.write(literal.COLON) + p.write(literal.SPACE) + + p.must(p.document.PrintType(p.document.VariableDefinitions[ref].Type, p.out)) + + if p.document.VariableDefinitions[ref].DefaultValue.IsDefined { + p.write(literal.SPACE) + p.write(literal.EQUALS) + p.write(literal.SPACE) + p.must(p.document.PrintValue(p.document.VariableDefinitions[ref].DefaultValue.Value, p.out)) + } + + if p.document.VariableDefinitions[ref].HasDirectives { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveVariableDefinition(ref int) { + if !p.document.VariableDefinitionsAfter(ref) { + p.write(literal.RPAREN) + } else { + p.write(literal.COMMA) + p.write(literal.SPACE) + } +} + +func (p *printVisitor) EnterArgument(ref int) { + if len(p.document.ArgumentsBefore(p.Ancestors[len(p.Ancestors)-1], ref)) == 0 { + p.write(literal.LPAREN) + } else { + p.write(literal.COMMA) + p.write(literal.SPACE) + } + p.must(p.document.PrintArgument(ref, p.out)) +} + +func (p *printVisitor) LeaveArgument(ref int) { + if len(p.document.ArgumentsAfter(p.Ancestors[len(p.Ancestors)-1], ref)) == 0 { + p.write(literal.RPAREN) + } +} + +func (p *printVisitor) EnterOperationDefinition(ref int) { + + hasName := p.document.OperationDefinitions[ref].Name.Length() > 0 + hasVariables := p.document.OperationDefinitions[ref].HasVariableDefinitions + + switch p.document.OperationDefinitions[ref].OperationType { + case ast.OperationTypeQuery: + if hasName || hasVariables { + p.write(literal.QUERY) + } + case ast.OperationTypeMutation: + p.write(literal.MUTATION) + case ast.OperationTypeSubscription: + p.write(literal.SUBSCRIPTION) + } + + if hasName { + p.write(literal.SPACE) + } + + if hasName { + p.write(p.document.Input.ByteSlice(p.document.OperationDefinitions[ref].Name)) + if !p.document.OperationDefinitions[ref].HasVariableDefinitions { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) LeaveOperationDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindOperationDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterSelectionSet(ref int) { + p.write(literal.LBRACE) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } +} + +func (p *printVisitor) LeaveSelectionSet(ref int) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + p.writeIndented(literal.RBRACE) +} + +func (p *printVisitor) EnterField(ref int) { + if p.document.Fields[ref].Alias.IsDefined { + p.writeIndented(p.document.Input.ByteSlice(p.document.Fields[ref].Alias.Name)) + p.write(literal.COLON) + p.write(literal.SPACE) + p.write(p.document.Input.ByteSlice(p.document.Fields[ref].Name)) + } else { + p.writeIndented(p.document.Input.ByteSlice(p.document.Fields[ref].Name)) + } + if !p.document.FieldHasArguments(ref) && (p.document.FieldHasSelections(ref) || p.document.FieldHasDirectives(ref)) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveField(ref int) { + if !p.document.FieldHasDirectives(ref) && len(p.SelectionsAfter) != 0 { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterFragmentSpread(ref int) { + p.writeIndented(literal.SPREAD) + p.write(p.document.Input.ByteSlice(p.document.FragmentSpreads[ref].FragmentName)) +} + +func (p *printVisitor) LeaveFragmentSpread(ref int) { + ancestor := p.Ancestors[len(p.Ancestors)-1] + if p.document.SelectionsAfterFragmentSpread(ref, ancestor) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterInlineFragment(ref int) { + p.writeIndented(literal.SPREAD) + if p.document.InlineFragments[ref].TypeCondition.Type != -1 { + p.write(literal.SPACE) + p.write(literal.ON) + p.write(literal.SPACE) + p.write(p.document.Input.ByteSlice(p.document.Types[p.document.InlineFragments[ref].TypeCondition.Type].Name)) + p.write(literal.SPACE) + } else if p.document.InlineFragments[ref].HasDirectives { + p.write(literal.SPACE) + } + +} + +func (p *printVisitor) LeaveInlineFragment(ref int) { + ancestor := p.Ancestors[len(p.Ancestors)-1] + if p.document.SelectionsAfterInlineFragment(ref, ancestor) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterFragmentDefinition(ref int) { + p.write(literal.FRAGMENT) + p.write(literal.SPACE) + p.write(p.document.Input.ByteSlice(p.document.FragmentDefinitions[ref].Name)) + p.write(literal.SPACE) + p.write(literal.ON) + p.write(literal.SPACE) + p.write(p.document.Input.ByteSlice(p.document.Types[p.document.FragmentDefinitions[ref].TypeCondition.Type].Name)) + p.write(literal.SPACE) + +} + +func (p *printVisitor) LeaveFragmentDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindFragmentDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterObjectTypeDefinition(ref int) { + + if p.document.ObjectTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.ObjectTypeDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.TYPE) + p.write(literal.SPACE) + p.write(p.document.ObjectTypeDefinitionNameBytes(ref)) + p.write(literal.SPACE) + + if len(p.document.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs) != 0 { + p.write(literal.IMPLEMENTS) + p.write(literal.SPACE) + for i, j := range p.document.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs { + if i != 0 { + p.write(literal.SPACE) + p.write(literal.AND) + p.write(literal.SPACE) + } + p.must(p.document.PrintType(j, p.out)) + } + p.write(literal.SPACE) + } + + p.inputValueDefinitionOpener = literal.LPAREN + p.inputValueDefinitionCloser = literal.RPAREN +} + +func (p *printVisitor) LeaveObjectTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindObjectTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterObjectTypeExtension(ref int) { + + if p.document.ObjectTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.ObjectTypeExtensions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.TYPE) + p.write(literal.SPACE) + p.write(p.document.ObjectTypeExtensionNameBytes(ref)) + p.write(literal.SPACE) + + p.inputValueDefinitionOpener = literal.LPAREN + p.inputValueDefinitionCloser = literal.RPAREN +} + +func (p *printVisitor) LeaveObjectTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindObjectTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterFieldDefinition(ref int) { + if p.document.FieldDefinitionIsFirst(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.LBRACE) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + } + if p.document.FieldDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.FieldDefinitions[ref].Description, p.indent, p.indentationDepth(), p.out)) + p.write(literal.LINETERMINATOR) + } + p.writeIndented(p.document.FieldDefinitionNameBytes(ref)) +} + +func (p *printVisitor) LeaveFieldDefinition(ref int) { + if !p.document.FieldDefinitionHasDirectives(ref) { + p.writeFieldType(ref) + } + + if p.document.FieldDefinitionIsLast(ref, p.Ancestors[len(p.Ancestors)-1]) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + p.write(literal.RBRACE) + } else { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterInputValueDefinition(ref int) { + if p.document.InputValueDefinitionIsFirst(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(p.inputValueDefinitionOpener) + } + if p.indent != nil { + switch p.Ancestors[len(p.Ancestors)-1].Kind { + case ast.NodeKindDirectiveDefinition, ast.NodeKindInputObjectTypeDefinition, ast.NodeKindInputObjectTypeExtension: + p.write(literal.LINETERMINATOR) + } + } + if p.document.InputValueDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.InputValueDefinitions[ref].Description, p.indent, p.indentationDepth(), p.out)) + p.write(literal.LINETERMINATOR) + } + switch p.Ancestors[len(p.Ancestors)-1].Kind { + case ast.NodeKindDirectiveDefinition, ast.NodeKindInputObjectTypeDefinition, ast.NodeKindInputObjectTypeExtension: + p.writeIndented(p.document.InputValueDefinitionNameBytes(ref)) + default: + p.write(p.document.InputValueDefinitionNameBytes(ref)) + } + p.write(literal.COLON) + p.write(literal.SPACE) + p.must(p.document.PrintType(p.document.InputValueDefinitionType(ref), p.out)) + if p.document.InputValueDefinitionHasDefaultValue(ref) { + p.write(literal.SPACE) + p.write(literal.EQUALS) + p.write(literal.SPACE) + p.must(p.document.PrintValue(p.document.InputValueDefinitionDefaultValue(ref), p.out)) + } +} + +func (p *printVisitor) LeaveInputValueDefinition(ref int) { + if p.document.InputValueDefinitionIsLast(ref, p.Ancestors[len(p.Ancestors)-1]) { + if p.indent != nil { + switch p.Ancestors[len(p.Ancestors)-1].Kind { + case ast.NodeKindDirectiveDefinition, ast.NodeKindInputObjectTypeDefinition, ast.NodeKindInputObjectTypeExtension: + p.write(literal.LINETERMINATOR) + } + } + p.write(p.inputValueDefinitionCloser) + } else { + if len(p.Ancestors) > 0 { + // check enclosing type kind + if p.Ancestors[len(p.Ancestors)-1].Kind == ast.NodeKindFieldDefinition { + p.write(literal.COMMA) + p.write(literal.SPACE) + } else if len(p.indent) == 0 { + // add space between arguments when printing without indents + p.write(literal.SPACE) + } + } + } +} + +func (p *printVisitor) EnterInterfaceTypeDefinition(ref int) { + + if p.document.InterfaceTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.InterfaceTypeDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.INTERFACE) + p.write(literal.SPACE) + p.write(p.document.InterfaceTypeDefinitionNameBytes(ref)) + p.write(literal.SPACE) + + p.inputValueDefinitionOpener = literal.LPAREN + p.inputValueDefinitionCloser = literal.RPAREN +} + +func (p *printVisitor) LeaveInterfaceTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindInterfaceTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterInterfaceTypeExtension(ref int) { + + if p.document.InterfaceTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.InterfaceTypeExtensions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.INTERFACE) + p.write(literal.SPACE) + p.write(p.document.InterfaceTypeExtensionNameBytes(ref)) + p.write(literal.SPACE) + + p.inputValueDefinitionOpener = literal.LPAREN + p.inputValueDefinitionCloser = literal.RPAREN +} + +func (p *printVisitor) LeaveInterfaceTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindInterfaceTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterScalarTypeDefinition(ref int) { + + if p.document.ScalarTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.ScalarTypeDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.SCALAR) + p.write(literal.SPACE) + p.write(p.document.ScalarTypeDefinitionNameBytes(ref)) + if p.document.ScalarTypeDefinitionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveScalarTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindScalarTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterScalarTypeExtension(ref int) { + + if p.document.ScalarTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.ScalarTypeExtensions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.SCALAR) + p.write(literal.SPACE) + p.write(p.document.ScalarTypeExtensionNameBytes(ref)) + if p.document.ScalarTypeExtensionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveScalarTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindScalarTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterUnionTypeDefinition(ref int) { + + if p.document.UnionTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.UnionTypeDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.UNION) + p.write(literal.SPACE) + p.write(p.document.UnionTypeDefinitionNameBytes(ref)) + if p.document.UnionTypeDefinitionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveUnionTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindUnionTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterUnionTypeExtension(ref int) { + + if p.document.UnionTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.UnionTypeExtensions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.UNION) + p.write(literal.SPACE) + p.write(p.document.UnionTypeExtensionNameBytes(ref)) + if p.document.UnionTypeExtensionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveUnionTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindUnionTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterUnionMemberType(ref int) { + if p.document.UnionMemberTypeIsFirst(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.SPACE) + p.write(literal.EQUALS) + p.write(literal.SPACE) + } + p.write(p.document.TypeNameBytes(ref)) + if !p.document.UnionMemberTypeIsLast(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.SPACE) + p.write(literal.PIPE) + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveUnionMemberType(ref int) { + +} + +func (p *printVisitor) EnterEnumTypeDefinition(ref int) { + + if p.document.EnumTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.EnumTypeDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.ENUM) + p.write(literal.SPACE) + p.write(p.document.EnumTypeDefinitionNameBytes(ref)) + if p.document.EnumTypeDefinitionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveEnumTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindEnumTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterEnumTypeExtension(ref int) { + + if p.document.EnumTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.EnumTypeExtensions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.ENUM) + p.write(literal.SPACE) + p.write(p.document.EnumTypeExtensionNameBytes(ref)) + if p.document.EnumTypeExtensionHasDirectives(ref) { + p.write(literal.SPACE) + } +} + +func (p *printVisitor) LeaveEnumTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindEnumTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterEnumValueDefinition(ref int) { + if p.document.EnumValueDefinitionIsFirst(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.SPACE) + p.write(literal.LBRACE) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + } + if p.document.EnumValueDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.EnumValueDefinitions[ref].Description, p.indent, p.indentationDepth(), p.out)) + p.write(literal.LINETERMINATOR) + } + p.writeIndented(p.document.EnumValueDefinitionNameBytes(ref)) +} + +func (p *printVisitor) LeaveEnumValueDefinition(ref int) { + if p.document.EnumValueDefinitionIsLast(ref, p.Ancestors[len(p.Ancestors)-1]) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + p.write(literal.RBRACE) + } else { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterInputObjectTypeDefinition(ref int) { + + if p.document.InputObjectTypeDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.InputObjectTypeDefinitions[ref].Description, nil, 0, p.out)) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + } + + p.write(literal.INPUT) + p.write(literal.SPACE) + p.write(p.document.InputObjectTypeDefinitionNameBytes(ref)) + p.write(literal.SPACE) + + p.inputValueDefinitionOpener = literal.LBRACE + p.inputValueDefinitionCloser = literal.RBRACE +} + +func (p *printVisitor) LeaveInputObjectTypeDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindInputObjectTypeDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterInputObjectTypeExtension(ref int) { + + if p.document.InputObjectTypeExtensions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.InputObjectTypeExtensions[ref].Description, nil, 0, p.out)) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + } + + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.INPUT) + p.write(literal.SPACE) + p.write(p.document.InputObjectTypeExtensionNameBytes(ref)) + p.write(literal.SPACE) + + p.inputValueDefinitionOpener = literal.LBRACE + p.inputValueDefinitionCloser = literal.RBRACE +} + +func (p *printVisitor) LeaveInputObjectTypeExtension(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindInputObjectTypeExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterDirectiveDefinition(ref int) { + if p.document.DirectiveDefinitions[ref].Description.IsDefined { + p.must(p.document.PrintDescription(p.document.DirectiveDefinitions[ref].Description, nil, 0, p.out)) + p.write(literal.LINETERMINATOR) + } + + p.write(literal.DIRECTIVE) + p.write(literal.SPACE) + p.write(literal.AT) + p.write(p.document.DirectiveDefinitionNameBytes(ref)) + p.isFirstDirectiveLocation = true + p.isDirectiveRepeatable = p.document.DirectiveDefinitionIsRepeatable(ref) + + p.inputValueDefinitionOpener = literal.LPAREN + p.inputValueDefinitionCloser = literal.RPAREN +} + +func (p *printVisitor) LeaveDirectiveDefinition(ref int) { + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindDirectiveDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterDirectiveLocation(location ast.DirectiveLocation) { + + if p.isFirstDirectiveLocation { + if p.isDirectiveRepeatable { + p.write(literal.SPACE) + p.write(literal.REPEATABLE) + } + + p.isFirstDirectiveLocation = false + p.write(literal.SPACE) + p.write(literal.ON) + p.write(literal.SPACE) + } else { + p.write(literal.SPACE) + p.write(literal.PIPE) + p.write(literal.SPACE) + } + + switch location { + case ast.ExecutableDirectiveLocationQuery: + p.write(literal.LocationQuery) + case ast.ExecutableDirectiveLocationMutation: + p.write(literal.LocationMutation) + case ast.ExecutableDirectiveLocationSubscription: + p.write(literal.LocationSubscription) + case ast.ExecutableDirectiveLocationField: + p.write(literal.LocationField) + case ast.ExecutableDirectiveLocationFragmentDefinition: + p.write(literal.LocationFragmentDefinition) + case ast.ExecutableDirectiveLocationFragmentSpread: + p.write(literal.LocationFragmentSpread) + case ast.ExecutableDirectiveLocationInlineFragment: + p.write(literal.LocationInlineFragment) + case ast.ExecutableDirectiveLocationVariableDefinition: + p.write(literal.LocationVariableDefinition) + case ast.TypeSystemDirectiveLocationSchema: + p.write(literal.LocationSchema) + case ast.TypeSystemDirectiveLocationScalar: + p.write(literal.LocationScalar) + case ast.TypeSystemDirectiveLocationObject: + p.write(literal.LocationObject) + case ast.TypeSystemDirectiveLocationFieldDefinition: + p.write(literal.LocationFieldDefinition) + case ast.TypeSystemDirectiveLocationArgumentDefinition: + p.write(literal.LocationArgumentDefinition) + case ast.TypeSystemDirectiveLocationInterface: + p.write(literal.LocationInterface) + case ast.TypeSystemDirectiveLocationUnion: + p.write(literal.LocationUnion) + case ast.TypeSystemDirectiveLocationEnum: + p.write(literal.LocationEnum) + case ast.TypeSystemDirectiveLocationEnumValue: + p.write(literal.LocationEnumValue) + case ast.TypeSystemDirectiveLocationInputObject: + p.write(literal.LocationInputObject) + case ast.TypeSystemDirectiveLocationInputFieldDefinition: + p.write(literal.LocationInputFieldDefinition) + } +} + +func (p *printVisitor) LeaveDirectiveLocation(location ast.DirectiveLocation) { + +} + +func (p *printVisitor) EnterSchemaDefinition(ref int) { + p.write(literal.SCHEMA) + p.write(literal.SPACE) +} + +func (p *printVisitor) LeaveSchemaDefinition(ref int) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + p.write(literal.RBRACE) + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindSchemaDefinition, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterSchemaExtension(ref int) { + p.write(literal.EXTEND) + p.write(literal.SPACE) + p.write(literal.SCHEMA) + p.write(literal.SPACE) +} + +func (p *printVisitor) LeaveSchemaExtension(ref int) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + p.write(literal.RBRACE) + if !p.document.NodeIsLastRootNode(ast.Node{Kind: ast.NodeKindSchemaExtension, Ref: ref}) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterRootOperationTypeDefinition(ref int) { + if p.document.RootOperationTypeDefinitionIsFirstInSchemaDefinition(ref, p.Ancestors[len(p.Ancestors)-1]) { + p.write(literal.LBRACE) + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } + } + switch p.document.RootOperationTypeDefinitions[ref].OperationType { + case ast.OperationTypeQuery: + p.writeIndented(literal.QUERY) + case ast.OperationTypeMutation: + p.writeIndented(literal.MUTATION) + case ast.OperationTypeSubscription: + p.writeIndented(literal.SUBSCRIPTION) + } + p.write(literal.COLON) + p.write(literal.SPACE) + p.write(p.document.Input.ByteSlice(p.document.RootOperationTypeDefinitions[ref].NamedType.Name)) +} + +func (p *printVisitor) LeaveRootOperationTypeDefinition(ref int) { + if !p.document.RootOperationTypeDefinitionIsLastInSchemaDefinition(ref, p.Ancestors[len(p.Ancestors)-1]) { + if p.indent != nil { + p.write(literal.LINETERMINATOR) + } else { + p.write(literal.SPACE) + } + } +} + +func (p *printVisitor) EnterDocument(operation, definition *ast.Document) { + +} + +func (p *printVisitor) LeaveDocument(operation, definition *ast.Document) { + +} + +func (p *printVisitor) writeFieldType(ref int) { + p.write(literal.COLON) + p.write(literal.SPACE) + p.must(p.document.PrintType(p.document.FieldDefinitionType(ref), p.out)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/asttransform.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/asttransform.go new file mode 100644 index 00000000000..1d862526723 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/asttransform.go @@ -0,0 +1,168 @@ +// Package asttransform contains a set of helper methods to make recursive ast transformations possible. +// +// This is especially useful for ast normalization for nested fragment inlining. +// +// This packages is necessary to make AST transformations possible while walking an AST recusively. +// In order to resolve dependencies in a tree (inline fragments & fragment spreads) it's necessary to resolve them in a specific order. +// The right order to not mess things up is from the deepest level up to the root. +// Therefore this package is used to register transformations while walking an AST in order to bring all transformations in the right order. +// Only then, when all transformations are in the right order according to depth, it's possible to safely apply them. +// +package asttransform + +import ( + "sort" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +type ( + // Transformable defines the interface which needs to be implemented in order to apply Transformations + // This needs to be implemented by any AST in order to be transformable + Transformable interface { + // DeleteRootNode marks a Node for deletion + DeleteRootNode(node ast.Node) + // EmptySelectionSet marks a selectionset for emptying + EmptySelectionSet(ref int) + // AppendSelectionSet marks to append a reference to a selectionset + AppendSelectionSet(ref int, appendRef int) + // ReplaceFragmentSpread marks to replace a fragment spread with a selectionset + ReplaceFragmentSpread(selectionSet int, spreadRef int, replaceWithSelectionSet int) + // ReplaceFragmentSpreadWithInlineFragment marks a fragment spread to be replaces with an inline fragment + ReplaceFragmentSpreadWithInlineFragment(selectionSet int, spreadRef int, replaceWithSelectionSet int, typeCondition ast.TypeCondition) + } + transformation interface { + apply(transformable Transformable) + } + // Precedence defines Depth and Order of each transformation + Precedence struct { + Depth int + Order int + } + action struct { + precedence Precedence + transformation transformation + } + // Transformer takes transformation registrations and applies them + Transformer struct { + actions []action + } +) + +// Reset empties all actions +func (t *Transformer) Reset() { + t.actions = t.actions[:0] +} + +// ApplyTransformations applies all registered transformations to a transformable +func (t *Transformer) ApplyTransformations(transformable Transformable) { + + sort.Slice(t.actions, func(i, j int) bool { + if t.actions[i].precedence.Depth != t.actions[j].precedence.Depth { + return t.actions[i].precedence.Depth > t.actions[j].precedence.Depth + } + return t.actions[i].precedence.Order < t.actions[j].precedence.Order + }) + + for i := range t.actions { + t.actions[i].transformation.apply(transformable) + } +} + +// DeleteRootNode registers an action to delete a root node +func (t *Transformer) DeleteRootNode(precedence Precedence, node ast.Node) { + t.actions = append(t.actions, action{ + precedence: precedence, + transformation: deleteRootNode{node: node}, + }) +} + +// EmptySelectionSet registers an actions to empty a selectionset +func (t *Transformer) EmptySelectionSet(precedence Precedence, ref int) { + t.actions = append(t.actions, action{ + precedence: precedence, + transformation: emptySelectionSet{ref: ref}, + }) +} + +// AppendSelectionSet registers an action to append a selection to a selectionset +func (t *Transformer) AppendSelectionSet(precedence Precedence, ref int, appendRef int) { + t.actions = append(t.actions, action{ + precedence: precedence, + transformation: appendSelectionSet{ + ref: ref, + appendRef: appendRef, + }, + }) +} + +// ReplaceFragmentSpread registers an action to replace a fragment spread with a selectionset +func (t *Transformer) ReplaceFragmentSpread(precedence Precedence, selectionSet int, spreadRef int, replaceWithSelectionSet int) { + t.actions = append(t.actions, action{ + precedence: precedence, + transformation: replaceFragmentSpread{ + selectionSet: selectionSet, + spreadRef: spreadRef, + replaceWithSelectionSet: replaceWithSelectionSet, + }, + }) +} + +// ReplaceFragmentSpreadWithInlineFragment registers an action to replace a fragment spread with an inline fragment +func (t *Transformer) ReplaceFragmentSpreadWithInlineFragment(precedence Precedence, selectionSet int, spreadRef int, replaceWithSelectionSet int, typeCondition ast.TypeCondition) { + t.actions = append(t.actions, action{ + precedence: precedence, + transformation: replaceFragmentSpreadWithInlineFragment{ + selectionSet: selectionSet, + spreadRef: spreadRef, + replaceWithSelectionSet: replaceWithSelectionSet, + typeCondition: typeCondition, + }, + }) +} + +type replaceFragmentSpread struct { + selectionSet int + spreadRef int + replaceWithSelectionSet int +} + +func (r replaceFragmentSpread) apply(transformable Transformable) { + transformable.ReplaceFragmentSpread(r.selectionSet, r.spreadRef, r.replaceWithSelectionSet) +} + +type replaceFragmentSpreadWithInlineFragment struct { + selectionSet int + spreadRef int + replaceWithSelectionSet int + typeCondition ast.TypeCondition +} + +func (r replaceFragmentSpreadWithInlineFragment) apply(transformable Transformable) { + transformable.ReplaceFragmentSpreadWithInlineFragment(r.selectionSet, r.spreadRef, r.replaceWithSelectionSet, r.typeCondition) +} + +type deleteRootNode struct { + node ast.Node +} + +func (d deleteRootNode) apply(transformable Transformable) { + transformable.DeleteRootNode(d.node) +} + +type emptySelectionSet struct { + ref int +} + +func (e emptySelectionSet) apply(transformable Transformable) { + transformable.EmptySelectionSet(e.ref) +} + +type appendSelectionSet struct { + ref int + appendRef int +} + +func (a appendSelectionSet) apply(transformable Transformable) { + transformable.AppendSelectionSet(a.ref, a.appendRef) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/baseschema.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/baseschema.go new file mode 100644 index 00000000000..bfad7508764 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/baseschema.go @@ -0,0 +1,340 @@ +package asttransform + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func MergeDefinitionWithBaseSchema(definition *ast.Document) error { + definition.Input.AppendInputBytes(baseSchema) + parser := astparser.NewParser() + report := operationreport.Report{} + parser.Parse(definition, &report) + if report.HasErrors() { + return report + } + return handleSchema(definition) +} + +func handleSchema(definition *ast.Document) error { + var queryNodeRef int + queryNode, hasQueryNode := findQueryNode(definition) + if hasQueryNode { + queryNodeRef = queryNode.Ref + } else { + queryNodeRef = definition.ImportObjectTypeDefinition("Query", "", nil, nil) + } + + addSchemaDefinition(definition) + addMissingRootOperationTypeDefinitions(definition) + addIntrospectionQueryFields(definition, queryNodeRef) + + typeNamesVisitor := NewTypeNameVisitor() + + return typeNamesVisitor.ExtendSchema(definition) +} + +func addSchemaDefinition(definition *ast.Document) { + if definition.HasSchemaDefinition() { + return + } + + schemaDefinition := ast.SchemaDefinition{} + definition.AddSchemaDefinitionRootNode(schemaDefinition) +} + +func addMissingRootOperationTypeDefinitions(definition *ast.Document) { + var rootOperationTypeRefs []int + + for i := range definition.RootNodes { + if definition.RootNodes[i].Kind == ast.NodeKindObjectTypeDefinition { + typeName := definition.ObjectTypeDefinitionNameBytes(definition.RootNodes[i].Ref) + + switch { + case bytes.Equal(typeName, ast.DefaultQueryTypeName): + rootOperationTypeRefs = createRootOperationTypeIfNotExists(definition, rootOperationTypeRefs, ast.OperationTypeQuery, i) + case bytes.Equal(typeName, ast.DefaultMutationTypeName): + rootOperationTypeRefs = createRootOperationTypeIfNotExists(definition, rootOperationTypeRefs, ast.OperationTypeMutation, i) + case bytes.Equal(typeName, ast.DefaultSubscriptionTypeName): + rootOperationTypeRefs = createRootOperationTypeIfNotExists(definition, rootOperationTypeRefs, ast.OperationTypeSubscription, i) + default: + continue + } + } + } + + definition.SchemaDefinitions[definition.SchemaDefinitionRef()].AddRootOperationTypeDefinitionRefs(rootOperationTypeRefs...) +} + +func createRootOperationTypeIfNotExists(definition *ast.Document, rootOperationTypeRefs []int, operationType ast.OperationType, nodeRef int) []int { + for i := range definition.RootOperationTypeDefinitions { + if definition.RootOperationTypeDefinitions[i].OperationType == operationType { + return rootOperationTypeRefs + } + } + + ref := definition.CreateRootOperationTypeDefinition(operationType, nodeRef) + return append(rootOperationTypeRefs, ref) +} + +func addIntrospectionQueryFields(definition *ast.Document, objectTypeDefinitionRef int) { + var fieldRefs []int + if !definition.ObjectTypeDefinitionHasField(objectTypeDefinitionRef, []byte("__schema")) { + fieldRefs = append(fieldRefs, addSchemaField(definition)) + } + + if !definition.ObjectTypeDefinitionHasField(objectTypeDefinitionRef, []byte("__type")) { + fieldRefs = append(fieldRefs, addTypeField(definition)) + } + + definition.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs = append(definition.ObjectTypeDefinitions[objectTypeDefinitionRef].FieldsDefinition.Refs, fieldRefs...) + definition.ObjectTypeDefinitions[objectTypeDefinitionRef].HasFieldDefinitions = true +} + +func addSchemaField(definition *ast.Document) (ref int) { + fieldNameRef := definition.Input.AppendInputBytes([]byte("__schema")) + fieldTypeRef := definition.AddNonNullNamedType([]byte("__Schema")) + + return definition.AddFieldDefinition(ast.FieldDefinition{ + Name: fieldNameRef, + Type: fieldTypeRef, + }) +} + +func addTypeField(definition *ast.Document) (ref int) { + fieldNameRef := definition.Input.AppendInputBytes([]byte("__type")) + fieldTypeRef := definition.AddNamedType([]byte("__Type")) + + argumentNameRef := definition.Input.AppendInputBytes([]byte("name")) + argumentTypeRef := definition.AddNonNullNamedType([]byte("String")) + + argumentRef := definition.AddInputValueDefinition(ast.InputValueDefinition{ + Name: argumentNameRef, + Type: argumentTypeRef, + }) + + return definition.AddFieldDefinition(ast.FieldDefinition{ + Name: fieldNameRef, + Type: fieldTypeRef, + + HasArgumentsDefinitions: true, + ArgumentsDefinition: ast.InputValueDefinitionList{ + Refs: []int{argumentRef}, + }, + }) +} + +func findQueryNode(definition *ast.Document) (queryNode ast.Node, ok bool) { + queryNode, ok = definition.Index.FirstNodeByNameBytes(definition.Index.QueryTypeName) + if !ok { + queryNode, ok = definition.Index.FirstNodeByNameStr("Query") + } + + return queryNode, ok +} + +var baseSchema = []byte(`"The 'Int' scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1." +scalar Int +"The 'Float' scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)." +scalar Float +"The 'String' scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text." +scalar String +"The 'Boolean' scalar type represents 'true' or 'false' ." +scalar Boolean +"The 'ID' scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as '4') or integer (such as 4) input value will be accepted as an ID." +scalar ID +"Directs the executor to include this field or fragment only when the argument is true." +directive @include( + " Included when true." + if: Boolean! +) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT +"Directs the executor to skip this field or fragment when the argument is true." +directive @skip( + "Skipped when true." + if: Boolean! +) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT +"Marks an element of a GraphQL schema as no longer supported." +directive @deprecated( + """ + Explains why this element was deprecated, usually also including a suggestion + for how to access supported similar data. Formatted in + [Markdown](https://daringfireball.net/projects/markdown/). + """ + reason: String = "No longer supported" +) on FIELD_DEFINITION | ENUM_VALUE + +""" +The @removeNullVariables directive allows you to remove variables with null value from your GraphQL Query or Mutation Operations. + +A potential use-case could be that you have a graphql upstream which is not accepting null values for variables. +By enabling this directive all variables with null values will be removed from upstream query. + +query ($say: String, $name: String) @removeNullVariables { + hello(say: $say, name: $name) +} + +Directive will transform variables json and remove top level null values. +{ "say": null, "name": "world" } + +So upstream will receive the following variables: + +{ "name": "world" } +""" +directive @removeNullVariables on QUERY | MUTATION + +""" +A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document. +In some cases, you need to provide options to alter GraphQL's execution behavior +in ways field arguments will not suffice, such as conditionally including or +skipping a field. Directives provide this by describing additional information +to the executor. +""" +type __Directive { + name: String! + description: String + locations: [__DirectiveLocation!]! + args: [__InputValue!]! + isRepeatable: Boolean! +} + +""" +A Directive can be adjacent to many parts of the GraphQL language, a +__DirectiveLocation describes one such possible adjacencies. +""" +enum __DirectiveLocation { + "Location adjacent to a query operation." + QUERY + "Location adjacent to a mutation operation." + MUTATION + "Location adjacent to a subscription operation." + SUBSCRIPTION + "Location adjacent to a field." + FIELD + "Location adjacent to a fragment definition." + FRAGMENT_DEFINITION + "Location adjacent to a fragment spread." + FRAGMENT_SPREAD + "Location adjacent to an inline fragment." + INLINE_FRAGMENT + "Location adjacent to a schema definition." + SCHEMA + "Location adjacent to a scalar definition." + SCALAR + "Location adjacent to an object type definition." + OBJECT + "Location adjacent to a field definition." + FIELD_DEFINITION + "Location adjacent to an argument definition." + ARGUMENT_DEFINITION + "Location adjacent to an interface definition." + INTERFACE + "Location adjacent to a union definition." + UNION + "Location adjacent to an enum definition." + ENUM + "Location adjacent to an enum value definition." + ENUM_VALUE + "Location adjacent to an input object type definition." + INPUT_OBJECT + "Location adjacent to an input object field definition." + INPUT_FIELD_DEFINITION +} +""" +One possible value for a given Enum. Enum values are unique values, not a +placeholder for a string or numeric value. However an Enum value is returned in +a JSON response as a string. +""" +type __EnumValue { + name: String! + description: String + isDeprecated: Boolean! + deprecationReason: String +} + +""" +Object and Interface types are described by a list of Fields, each of which has +a name, potentially a list of arguments, and a return type. +""" +type __Field { + name: String! + description: String + args: [__InputValue!]! + type: __Type! + isDeprecated: Boolean! + deprecationReason: String +} + +"""Arguments provided to Fields or Directives and the input fields of an +InputObject are represented as Input Values which describe their type and +optionally a default value. +""" +type __InputValue { + name: String! + description: String + type: __Type! + "A GraphQL-formatted string representing the default value for this input value." + defaultValue: String +} + +""" +A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all +available types and directives on the server, as well as the entry points for +query, mutation, and subscription operations. +""" +type __Schema { + "A list of all types supported by this server." + types: [__Type!]! + "The type that query operations will be rooted at." + queryType: __Type! + "If this server supports mutation, the type that mutation operations will be rooted at." + mutationType: __Type + "If this server support subscription, the type that subscription operations will be rooted at." + subscriptionType: __Type + "A list of all directives supported by this server." + directives: [__Directive!]! +} + +""" +The fundamental unit of any GraphQL Schema is the type. There are many kinds of +types in GraphQL as represented by the '__TypeKind' enum. + +Depending on the kind of a type, certain fields describe information about that +type. Scalar types provide no information beyond a name and description, while +Enum types provide their values. Object and Interface types provide the fields +they describe. Abstract types, Union and Interface, provide the Object types +possible at runtime. List and NonNull types compose other types. +""" +type __Type { + kind: __TypeKind! + name: String + description: String + fields(includeDeprecated: Boolean = false): [__Field!] + interfaces: [__Type!] + possibleTypes: [__Type!] + enumValues(includeDeprecated: Boolean = false): [__EnumValue!] + inputFields: [__InputValue!] + ofType: __Type +} + +"An enum describing what kind of type a given '__Type' is." +enum __TypeKind { + "Indicates this type is a scalar." + SCALAR + "Indicates this type is an object. 'fields' and 'interfaces' are valid fields." + OBJECT + "Indicates this type is an interface. 'fields' ' and ' 'possibleTypes' are valid fields." + INTERFACE + "Indicates this type is a union. 'possibleTypes' is a valid field." + UNION + "Indicates this type is an enum. 'enumValues' is a valid field." + ENUM + "Indicates this type is an input object. 'inputFields' is a valid field." + INPUT_OBJECT + "Indicates this type is a list. 'ofType' is a valid field." + LIST + "Indicates this type is a non-null. 'ofType' is a valid field." + NON_NULL +}`) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/typename_visitor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/typename_visitor.go new file mode 100644 index 00000000000..b410f630577 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asttransform/typename_visitor.go @@ -0,0 +1,87 @@ +package asttransform + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const typenameFieldName = "__typename" + +type TypeNameVisitor struct { + *astvisitor.Walker + definition *ast.Document +} + +func NewTypeNameVisitor() *TypeNameVisitor { + walker := astvisitor.NewWalker(48) + + visitor := &TypeNameVisitor{ + Walker: &walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterLeaveObjectTypeDefinitionVisitor(visitor) + walker.RegisterLeaveInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterLeaveUnionTypeDefinitionVisitor(visitor) + + return visitor +} + +func (v *TypeNameVisitor) ExtendSchema(definition *ast.Document) error { + report := &operationreport.Report{} + + v.Walk(definition, definition, report) + + if report.HasErrors() { + return report + } + return nil +} + +func (v *TypeNameVisitor) EnterDocument(definition, _ *ast.Document) { + v.definition = definition +} + +func (v *TypeNameVisitor) LeaveInterfaceTypeDefinition(ref int) { + if v.definition.InterfaceTypeDefinitions[ref].HasFieldDefinitions && + v.definition.FieldDefinitionsContainField(v.definition.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs, literal.TYPENAME) { + return + } + + v.definition.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs = append(v.definition.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs, v.addTypeNameField()) + v.definition.InterfaceTypeDefinitions[ref].HasFieldDefinitions = true +} + +func (v *TypeNameVisitor) LeaveObjectTypeDefinition(ref int) { + objectTypeDefName := v.definition.ObjectTypeDefinitionNameBytes(ref) + if bytes.Equal(objectTypeDefName, v.definition.Index.SubscriptionTypeName) || + bytes.Equal(objectTypeDefName, ast.DefaultSubscriptionTypeName) { + return + } + + if v.definition.ObjectTypeDefinitions[ref].HasFieldDefinitions && + v.definition.ObjectTypeDefinitionHasField(ref, literal.TYPENAME) { + return + } + + v.definition.ObjectTypeDefinitions[ref].FieldsDefinition.Refs = append(v.definition.ObjectTypeDefinitions[ref].FieldsDefinition.Refs, v.addTypeNameField()) + v.definition.ObjectTypeDefinitions[ref].HasFieldDefinitions = true +} + +func (v *TypeNameVisitor) LeaveUnionTypeDefinition(ref int) { + if v.definition.UnionTypeDefinitions[ref].HasFieldDefinitions && + v.definition.UnionTypeDefinitionHasField(ref, literal.TYPENAME) { + return // this makes the operation idempotent + } + v.definition.UnionTypeDefinitions[ref].FieldsDefinition.Refs = []int{v.addTypeNameField()} + v.definition.UnionTypeDefinitions[ref].HasFieldDefinitions = true +} + +func (v *TypeNameVisitor) addTypeNameField() (ref int) { + typeRef := v.definition.AddNonNullNamedType(literal.STRING) + return v.definition.ImportFieldDefinition(typenameFieldName, "", typeRef, nil, nil) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/definition_validation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/definition_validation.go new file mode 100644 index 00000000000..e0e6728f813 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/definition_validation.go @@ -0,0 +1,56 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func DefaultDefinitionValidator() *DefinitionValidator { + return NewDefinitionValidator( + PopulatedTypeBodies(), + UniqueOperationTypes(), + UniqueTypeNames(), + UniqueFieldDefinitionNames(), + UniqueEnumValueNames(), + UniqueUnionMemberTypes(), + KnownTypeNames(), + RequireDefinedTypesForExtensions(), + ImplementTransitiveInterfaces(), + ImplementingTypesAreSupersets(), + DirectivesAreUniquePerLocation(), + ) +} + +func NewDefinitionValidator(rules ...Rule) *DefinitionValidator { + validator := &DefinitionValidator{ + walker: astvisitor.NewWalker(48), + } + + for _, rule := range rules { + validator.RegisterRule(rule) + } + + return validator +} + +type DefinitionValidator struct { + walker astvisitor.Walker +} + +func (d *DefinitionValidator) RegisterRule(rule Rule) { + rule(&d.walker) +} + +func (d *DefinitionValidator) Validate(definition *ast.Document, report *operationreport.Report) ValidationState { + if report == nil { + report = &operationreport.Report{} + } + + d.walker.Walk(definition, definition, report) + + if report.HasErrors() { + return Invalid + } + return Valid +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variable_uses_defined.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variable_uses_defined.go new file mode 100644 index 00000000000..9b8b28fc962 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variable_uses_defined.go @@ -0,0 +1,56 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// AllVariableUsesDefined validates if used variables are defined within the operation +func AllVariableUsesDefined() Rule { + return func(walker *astvisitor.Walker) { + visitor := allVariableUsesDefinedVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + } +} + +type allVariableUsesDefinedVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (a *allVariableUsesDefinedVisitor) EnterDocument(operation, definition *ast.Document) { + a.operation = operation + a.definition = definition +} + +func (a *allVariableUsesDefinedVisitor) EnterArgument(ref int) { + + if a.operation.Arguments[ref].Value.Kind != ast.ValueKindVariable { + return // skip because no variable + } + + if a.Ancestors[0].Kind != ast.NodeKindOperationDefinition { + // skip because variable is not used in operation which happens in case normalization did not merge the fragment definition + // this happens when a fragment is defined but not used which will itself lead to another validation error + // in which case we can safely skip here + return + } + + variableName := a.operation.VariableValueNameBytes(a.operation.Arguments[ref].Value.Ref) + + for _, i := range a.operation.OperationDefinitions[a.Ancestors[0].Ref].VariableDefinitions.Refs { + if bytes.Equal(variableName, a.operation.VariableDefinitionNameBytes(i)) { + return // return OK because variable is defined + } + } + + // at this point we're safe to say this variable was not defined on the root operation of this argument + argumentName := a.operation.ArgumentNameBytes(ref) + a.StopWithExternalErr(operationreport.ErrVariableNotDefinedOnArgument(variableName, argumentName)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variables_used.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variables_used.go new file mode 100644 index 00000000000..ca98102b086 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_all_variables_used.go @@ -0,0 +1,84 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// AllVariablesUsed validates if all defined variables are used +func AllVariablesUsed() Rule { + return func(walker *astvisitor.Walker) { + visitor := allVariablesUsedVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterOperationVisitor(&visitor) + walker.RegisterLeaveOperationVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + } +} + +type allVariablesUsedVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + variableDefinitions []int +} + +func (a *allVariablesUsedVisitor) EnterDocument(operation, definition *ast.Document) { + a.operation = operation + a.definition = definition + a.variableDefinitions = a.variableDefinitions[:0] +} + +func (a *allVariablesUsedVisitor) EnterOperationDefinition(ref int) { + a.variableDefinitions = append(a.variableDefinitions, a.operation.OperationDefinitions[ref].VariableDefinitions.Refs...) +} + +func (a *allVariablesUsedVisitor) LeaveOperationDefinition(ref int) { + if len(a.variableDefinitions) != 0 { + operationName := a.operation.Input.ByteSlice(a.operation.OperationDefinitions[ref].Name) + for _, i := range a.variableDefinitions { + variableName := a.operation.VariableDefinitionNameBytes(i) + a.Report.AddExternalError(operationreport.ErrVariableDefinedButNeverUsed(variableName, operationName)) + } + a.Stop() + } +} + +func (a *allVariablesUsedVisitor) EnterArgument(ref int) { + + if len(a.variableDefinitions) == 0 { + return // nothing to check, skip + } + + a.verifyValue(a.operation.Arguments[ref].Value) +} + +func (a *allVariablesUsedVisitor) verifyValue(value ast.Value) { + switch value.Kind { + case ast.ValueKindVariable: // don't skip + case ast.ValueKindObject: + for _, i := range a.operation.ObjectValues[value.Ref].Refs { + a.verifyValue(a.operation.ObjectFields[i].Value) + } + return + case ast.ValueKindList: + for _, i := range a.operation.ListValues[value.Ref].Refs { + a.verifyValue(a.operation.Values[i]) + } + return + default: + return // skip all others + } + + variableName := a.operation.VariableValueNameBytes(value.Ref) + for i, j := range a.variableDefinitions { + if bytes.Equal(variableName, a.operation.VariableDefinitionNameBytes(j)) { + a.variableDefinitions = append(a.variableDefinitions[:i], a.variableDefinitions[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_argument_uniqueness.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_argument_uniqueness.go new file mode 100644 index 00000000000..39445402231 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_argument_uniqueness.go @@ -0,0 +1,42 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// ArgumentUniqueness validates if arguments are unique +func ArgumentUniqueness() Rule { + return func(walker *astvisitor.Walker) { + visitor := argumentUniquenessVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + } +} + +type argumentUniquenessVisitor struct { + *astvisitor.Walker + operation *ast.Document +} + +func (a *argumentUniquenessVisitor) EnterDocument(operation, definition *ast.Document) { + a.operation = operation +} + +func (a *argumentUniquenessVisitor) EnterArgument(ref int) { + + argumentName := a.operation.ArgumentNameBytes(ref) + argumentsAfter := a.operation.ArgumentsAfter(a.Ancestors[len(a.Ancestors)-1], ref) + + for _, i := range argumentsAfter { + if bytes.Equal(argumentName, a.operation.ArgumentNameBytes(i)) { + a.StopWithExternalErr(operationreport.ErrArgumentMustBeUnique(argumentName)) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_defined.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_defined.go new file mode 100644 index 00000000000..9a298acddb9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_defined.go @@ -0,0 +1,39 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// DirectivesAreDefined validates if used directives are defined +func DirectivesAreDefined() Rule { + return func(walker *astvisitor.Walker) { + visitor := directivesAreDefinedVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterDirectiveVisitor(&visitor) + } +} + +type directivesAreDefinedVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (d *directivesAreDefinedVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation = operation + d.definition = definition +} + +func (d *directivesAreDefinedVisitor) EnterDirective(ref int) { + + directiveName := d.operation.DirectiveNameBytes(ref) + definition, exists := d.definition.Index.FirstNodeByNameBytes(directiveName) + + if !exists || definition.Kind != ast.NodeKindDirectiveDefinition { + d.StopWithExternalErr(operationreport.ErrDirectiveUndefined(directiveName)) + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_in_valid_locations.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_in_valid_locations.go new file mode 100644 index 00000000000..71ee0bf0613 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_in_valid_locations.go @@ -0,0 +1,56 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// DirectivesAreInValidLocations validates if directives are used in the right place +func DirectivesAreInValidLocations() Rule { + return func(walker *astvisitor.Walker) { + visitor := directivesAreInValidLocationsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterDirectiveVisitor(&visitor) + } +} + +type directivesAreInValidLocationsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (d *directivesAreInValidLocationsVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation = operation + d.definition = definition +} + +func (d *directivesAreInValidLocationsVisitor) EnterDirective(ref int) { + + directiveName := d.operation.DirectiveNameBytes(ref) + definition, exists := d.definition.Index.FirstNodeByNameBytes(directiveName) + + if !exists || definition.Kind != ast.NodeKindDirectiveDefinition { + return // not defined, skip + } + + ancestor := d.Ancestors[len(d.Ancestors)-1] + + if !d.directiveDefinitionContainsNodeLocation(definition.Ref, ancestor) { + ancestorKindName := d.operation.NodeKindNameBytes(ancestor) + d.StopWithExternalErr(operationreport.ErrDirectiveNotAllowedOnNode(directiveName, ancestorKindName)) + return + } +} + +func (d *directivesAreInValidLocationsVisitor) directiveDefinitionContainsNodeLocation(definition int, node ast.Node) bool { + + nodeDirectiveLocation, err := d.operation.NodeDirectiveLocation(node) + if err != nil { + return false + } + + return d.definition.DirectiveDefinitions[definition].DirectiveLocations.Get(nodeDirectiveLocation) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_unique_per_location.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_unique_per_location.go new file mode 100644 index 00000000000..806248a548a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_directives_unique_per_location.go @@ -0,0 +1,68 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// DirectivesAreUniquePerLocation validates if directives are unique per location +func DirectivesAreUniquePerLocation() Rule { + return func(walker *astvisitor.Walker) { + visitor := directivesAreUniquePerLocationVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterDirectiveVisitor(&visitor) + } +} + +type directivesAreUniquePerLocationVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + seenDuplicates map[int]struct{} +} + +func (d *directivesAreUniquePerLocationVisitor) EnterDocument(operation, definition *ast.Document) { + d.operation = operation + d.definition = definition + d.seenDuplicates = make(map[int]struct{}) +} + +func (d *directivesAreUniquePerLocationVisitor) EnterDirective(ref int) { + if _, seen := d.seenDuplicates[ref]; seen { + // skip directive reported as duplicate + return + } + + directiveName := d.operation.DirectiveNameBytes(ref) + + directiveDefRef, exists := d.definition.DirectiveDefinitionByNameBytes(directiveName) + if !exists { + // ignore unknown directives + return + } + + if d.definition.DirectiveDefinitionIsRepeatable(directiveDefRef) { + // ignore repeatable directives + return + } + + nodeDirectives := d.operation.NodeDirectives(d.Ancestors[len(d.Ancestors)-1]) + for _, j := range nodeDirectives { + if j == ref { + continue + } + if bytes.Equal(directiveName, d.operation.DirectiveNameBytes(j)) { + d.seenDuplicates[j] = struct{}{} + d.Report.AddExternalError(operationreport.ErrDirectiveMustBeUniquePerLocation( + directiveName, + d.operation.Directives[ref].At, + d.operation.Directives[j].At, + )) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_document_contains_executable_operation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_document_contains_executable_operation.go new file mode 100644 index 00000000000..dd90a4d3822 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_document_contains_executable_operation.go @@ -0,0 +1,34 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// DocumentContainsExecutableOperation validates if the document actually contains an executable Operation +func DocumentContainsExecutableOperation() Rule { + return func(walker *astvisitor.Walker) { + visitor := &documentContainsExecutableOperation{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(visitor) + } +} + +type documentContainsExecutableOperation struct { + *astvisitor.Walker +} + +func (d *documentContainsExecutableOperation) EnterDocument(operation, definition *ast.Document) { + if len(operation.RootNodes) == 0 { + d.StopWithExternalErr(operationreport.ErrDocumentDoesntContainExecutableOperation()) + return + } + for i := range operation.RootNodes { + if operation.RootNodes[i].Kind == ast.NodeKindOperationDefinition { + return + } + } + d.StopWithExternalErr(operationreport.ErrDocumentDoesntContainExecutableOperation()) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_field_selection_merging.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_field_selection_merging.go new file mode 100644 index 00000000000..47dc0fe7e70 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_field_selection_merging.go @@ -0,0 +1,225 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// FieldSelectionMerging validates if field selections can be merged +func FieldSelectionMerging() Rule { + return func(walker *astvisitor.Walker) { + visitor := fieldSelectionMergingVisitor{Walker: walker} + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterFieldVisitor(&visitor) + walker.RegisterEnterOperationVisitor(&visitor) + walker.RegisterEnterFragmentDefinitionVisitor(&visitor) + } +} + +type fieldSelectionMergingVisitor struct { + *astvisitor.Walker + definition, operation *ast.Document + scalarRequirements scalarRequirements + nonScalarRequirements nonScalarRequirements + refs []int + pathCache [256][32]ast.PathItem + pathCacheIndex int +} +type nonScalarRequirement struct { + path ast.Path + objectName ast.ByteSlice + fieldTypeRef int + fieldTypeDefinitionNode ast.Node +} + +type nonScalarRequirements []nonScalarRequirement + +func (f *fieldSelectionMergingVisitor) NonScalarRequirementsByPathField(path ast.Path, objectName ast.ByteSlice) []int { + f.refs = f.refs[:0] + for i := range f.nonScalarRequirements { + if f.nonScalarRequirements[i].path.Equals(path) && f.nonScalarRequirements[i].objectName.Equals(objectName) { + f.refs = append(f.refs, i) + } + } + return f.refs +} + +type scalarRequirement struct { + path ast.Path + objectName ast.ByteSlice + fieldRef int + fieldType int + enclosingTypeDefinition ast.Node + fieldTypeDefinitionNode ast.Node +} + +type scalarRequirements []scalarRequirement + +func (f *fieldSelectionMergingVisitor) ScalarRequirementsByPathField(path ast.Path, objectName ast.ByteSlice) []int { + f.refs = f.refs[:0] + for i := range f.scalarRequirements { + if f.scalarRequirements[i].path.Equals(path) && f.scalarRequirements[i].objectName.Equals(objectName) { + f.refs = append(f.refs, i) + } + } + return f.refs +} + +func (f *fieldSelectionMergingVisitor) resetRequirements() { + f.scalarRequirements = f.scalarRequirements[:0] + f.nonScalarRequirements = f.nonScalarRequirements[:0] +} + +func (f *fieldSelectionMergingVisitor) EnterDocument(operation, definition *ast.Document) { + f.operation = operation + f.definition = definition + f.pathCacheIndex = 0 +} + +func (f *fieldSelectionMergingVisitor) EnterFragmentDefinition(ref int) { + f.resetRequirements() +} + +func (f *fieldSelectionMergingVisitor) EnterOperationDefinition(ref int) { + f.resetRequirements() +} + +func (f *fieldSelectionMergingVisitor) EnterField(ref int) { + fieldName := f.operation.FieldNameBytes(ref) + if bytes.Equal(fieldName, literal.TYPENAME) { + return + } + objectName := f.operation.FieldAliasOrNameBytes(ref) + definition, ok := f.definition.NodeFieldDefinitionByName(f.EnclosingTypeDefinition, fieldName) + if !ok { + enclosingTypeName := f.definition.NodeNameBytes(f.EnclosingTypeDefinition) + f.StopWithExternalErr(operationreport.ErrFieldUndefinedOnType(fieldName, enclosingTypeName)) + return + } + + fieldType := f.definition.FieldDefinitionType(definition) + fieldDefinitionTypeNode := f.definition.FieldDefinitionTypeNode(definition) + if fieldDefinitionTypeNode.Kind != ast.NodeKindScalarTypeDefinition { + + matchedRequirements := f.NonScalarRequirementsByPathField(f.Path, objectName) + fieldDefinitionTypeKindPresentInRequirements := false + for _, i := range matchedRequirements { + + if !f.potentiallySameObject(fieldDefinitionTypeNode, f.nonScalarRequirements[i].fieldTypeDefinitionNode) { + if !objectName.Equals(f.nonScalarRequirements[i].objectName) { + f.StopWithExternalErr(operationreport.ErrResponseOfDifferingTypesMustBeOfSameShape(objectName, f.nonScalarRequirements[i].objectName)) + return + } + } else if !f.definition.TypesAreCompatibleDeep(f.nonScalarRequirements[i].fieldTypeRef, fieldType) { + left, err := f.definition.PrintTypeBytes(f.nonScalarRequirements[i].fieldTypeRef, nil) + if err != nil { + f.StopWithInternalErr(err) + return + } + right, err := f.definition.PrintTypeBytes(fieldType, nil) + if err != nil { + f.StopWithInternalErr(err) + return + } + f.StopWithExternalErr(operationreport.ErrTypesForFieldMismatch(objectName, left, right)) + return + } + + if fieldDefinitionTypeNode.Kind != f.nonScalarRequirements[i].fieldTypeDefinitionNode.Kind { + fieldDefinitionTypeKindPresentInRequirements = true + } + } + + if len(matchedRequirements) != 0 && fieldDefinitionTypeKindPresentInRequirements { + return + } + + var path ast.Path + if f.pathCacheIndex != len(f.pathCache)-1 { + path = f.pathCache[f.pathCacheIndex][:len(f.Path)] + f.pathCacheIndex++ + } else { + path = make(ast.Path, len(f.Path)) + } + copy(path, f.Path) + + f.nonScalarRequirements = append(f.nonScalarRequirements, nonScalarRequirement{ + path: path, + objectName: objectName, + fieldTypeRef: fieldType, + fieldTypeDefinitionNode: fieldDefinitionTypeNode, + }) + return + } + + matchedRequirements := f.ScalarRequirementsByPathField(f.Path, objectName) + fieldDefinitionTypeKindPresentInRequirements := false + + for _, i := range matchedRequirements { + if f.potentiallySameObject(f.scalarRequirements[i].enclosingTypeDefinition, f.EnclosingTypeDefinition) { + if !f.operation.FieldsAreEqualFlat(f.scalarRequirements[i].fieldRef, ref) { + f.StopWithExternalErr(operationreport.ErrDifferingFieldsOnPotentiallySameType(objectName)) + return + } + } + if !f.definition.TypesAreCompatibleDeep(f.scalarRequirements[i].fieldType, fieldType) { + left, err := f.definition.PrintTypeBytes(f.scalarRequirements[i].fieldType, nil) + if err != nil { + f.StopWithInternalErr(err) + return + } + right, err := f.definition.PrintTypeBytes(fieldType, nil) + if err != nil { + f.StopWithInternalErr(err) + return + } + f.StopWithExternalErr(operationreport.ErrFieldsConflict(objectName, left, right)) + return + } + + if fieldDefinitionTypeNode.Kind != f.scalarRequirements[i].fieldTypeDefinitionNode.Kind { + fieldDefinitionTypeKindPresentInRequirements = true + } + } + + if len(matchedRequirements) != 0 && fieldDefinitionTypeKindPresentInRequirements { + return + } + + var path ast.Path + if f.pathCacheIndex != len(f.pathCache)-1 { + path = f.pathCache[f.pathCacheIndex][:len(f.Path)] + f.pathCacheIndex++ + } else { + path = make(ast.Path, len(f.Path)) + } + copy(path, f.Path) + + f.scalarRequirements = append(f.scalarRequirements, scalarRequirement{ + path: path, + objectName: objectName, + fieldRef: ref, + fieldType: fieldType, + enclosingTypeDefinition: f.EnclosingTypeDefinition, + fieldTypeDefinitionNode: fieldDefinitionTypeNode, + }) +} + +func (f *fieldSelectionMergingVisitor) potentiallySameObject(left, right ast.Node) bool { + switch { + case left.Kind == ast.NodeKindInterfaceTypeDefinition || right.Kind == ast.NodeKindInterfaceTypeDefinition: + return true + case left.Kind == ast.NodeKindObjectTypeDefinition && right.Kind == ast.NodeKindObjectTypeDefinition: + return bytes.Equal(f.definition.ObjectTypeDefinitionNameBytes(left.Ref), f.definition.ObjectTypeDefinitionNameBytes(right.Ref)) + default: + return false + } +} + +func (f *fieldSelectionMergingVisitor) EnterSelectionSet(ref int) { + +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_fragments.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_fragments.go new file mode 100644 index 00000000000..9bb9ad83f3e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_fragments.go @@ -0,0 +1,117 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// Fragments validates if the use of fragments in a given document is correct +func Fragments() Rule { + return func(walker *astvisitor.Walker) { + visitor := fragmentsVisitor{ + Walker: walker, + fragmentDefinitionsVisited: make([]ast.ByteSlice, 0, 8), + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterLeaveDocumentVisitor(&visitor) + walker.RegisterEnterFragmentDefinitionVisitor(&visitor) + walker.RegisterEnterInlineFragmentVisitor(&visitor) + walker.RegisterEnterFragmentSpreadVisitor(&visitor) + } +} + +type fragmentsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + fragmentDefinitionsVisited []ast.ByteSlice +} + +func (f *fragmentsVisitor) EnterFragmentSpread(ref int) { + if f.Ancestors[0].Kind == ast.NodeKindOperationDefinition { + spreadName := f.operation.FragmentSpreadNameBytes(ref) + f.StopWithExternalErr(operationreport.ErrFragmentSpreadFormsCycle(spreadName)) + } +} + +func (f *fragmentsVisitor) LeaveDocument(operation, definition *ast.Document) { + for i := range f.fragmentDefinitionsVisited { + if !f.operation.FragmentDefinitionIsUsed(f.fragmentDefinitionsVisited[i]) { + fragmentName := f.fragmentDefinitionsVisited[i] + f.StopWithExternalErr(operationreport.ErrFragmentDefinedButNotUsed(fragmentName)) + return + } + } +} + +func (f *fragmentsVisitor) fragmentOnNodeIsAllowed(node ast.Node) bool { + switch node.Kind { + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + return true + default: + return false + } +} + +func (f *fragmentsVisitor) EnterInlineFragment(ref int) { + + if !f.operation.InlineFragmentHasTypeCondition(ref) { + return + } + + typeName := f.operation.InlineFragmentTypeConditionName(ref) + + node, exists := f.definition.Index.FirstNonExtensionNodeByNameBytes(typeName) + if !exists { + typePosition := f.operation.Types[f.operation.InlineFragments[ref].TypeCondition.Type].Position + f.Report.AddExternalError(operationreport.ErrUnknownType(typeName, typePosition)) + f.SkipNode() // skipping node cause otherwise visitor will not be able to get enclosing type and will stop with error but error is already added here + return + } + + if !f.fragmentOnNodeIsAllowed(node) { + f.StopWithExternalErr(operationreport.ErrInlineFragmentOnTypeDisallowed(typeName)) + return + } + + if !f.definition.NodeFragmentIsAllowedOnNode(node, f.EnclosingTypeDefinition) { + enclosingTypeName := f.definition.NodeNameBytes(f.EnclosingTypeDefinition) + f.StopWithExternalErr(operationreport.ErrInlineFragmentOnTypeMismatchEnclosingType(typeName, enclosingTypeName)) + return + } +} + +func (f *fragmentsVisitor) EnterDocument(operation, definition *ast.Document) { + f.operation = operation + f.definition = definition + f.fragmentDefinitionsVisited = f.fragmentDefinitionsVisited[:0] +} + +func (f *fragmentsVisitor) EnterFragmentDefinition(ref int) { + + fragmentDefinitionName := f.operation.FragmentDefinitionNameBytes(ref) + typeName := f.operation.FragmentDefinitionTypeName(ref) + + node, exists := f.definition.Index.FirstNodeByNameBytes(typeName) + if !exists { + typePosition := f.operation.Types[f.operation.FragmentDefinitions[ref].TypeCondition.Type].Position + f.StopWithExternalErr(operationreport.ErrUnknownType(typeName, typePosition)) + return + } + + if !f.fragmentOnNodeIsAllowed(node) { + f.StopWithExternalErr(operationreport.ErrFragmentDefinitionOnTypeDisallowed(fragmentDefinitionName, typeName)) + return + } + + for i := range f.fragmentDefinitionsVisited { + if bytes.Equal(fragmentDefinitionName, f.fragmentDefinitionsVisited[i]) { + f.StopWithExternalErr(operationreport.ErrFragmentDefinitionMustBeUnique(fragmentDefinitionName)) + return + } + } + + f.fragmentDefinitionsVisited = append(f.fragmentDefinitionsVisited, fragmentDefinitionName) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_known_arguments.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_known_arguments.go new file mode 100644 index 00000000000..b436655ff45 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_known_arguments.go @@ -0,0 +1,62 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// KnownArguments validates if all arguments are known +func KnownArguments() Rule { + return func(walker *astvisitor.Walker) { + visitor := knownArgumentsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + walker.RegisterEnterFieldVisitor(&visitor) + } +} + +type knownArgumentsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + enclosingNode ast.Node +} + +func (v *knownArgumentsVisitor) EnterField(ref int) { + _, exists := v.FieldDefinition(ref) + if !exists { + v.SkipNode() // ignore arguments of not existing fields + return + } + + v.enclosingNode = v.EnclosingTypeDefinition +} + +func (v *knownArgumentsVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation = operation + v.definition = definition +} + +func (v *knownArgumentsVisitor) EnterArgument(ref int) { + _, exists := v.ArgumentInputValueDefinition(ref) + if exists { + return + } + + ancestor := v.Ancestor() + ancestorName := v.AncestorNameBytes() + + argumentName := v.operation.ArgumentNameBytes(ref) + argumentPosition := v.operation.Arguments[ref].Position + + switch ancestor.Kind { + case ast.NodeKindField: + objectTypeDefName := v.definition.ObjectTypeDefinitionNameBytes(v.enclosingNode.Ref) + + v.Report.AddExternalError(operationreport.ErrArgumentNotDefinedOnField(argumentName, objectTypeDefName, ancestorName, argumentPosition)) + case ast.NodeKindDirective: + v.Report.AddExternalError(operationreport.ErrArgumentNotDefinedOnDirective(argumentName, ancestorName, argumentPosition)) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_lone_anonymous_operation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_lone_anonymous_operation.go new file mode 100644 index 00000000000..b45160cb57a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_lone_anonymous_operation.go @@ -0,0 +1,31 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// LoneAnonymousOperation validates if anonymous operations are alone in a given document. +func LoneAnonymousOperation() Rule { + return func(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(&loneAnonymousOperationVisitor{walker}) + } +} + +type loneAnonymousOperationVisitor struct { + *astvisitor.Walker +} + +func (l *loneAnonymousOperationVisitor) EnterDocument(operation, definition *ast.Document) { + if len(operation.OperationDefinitions) <= 1 { + return + } + + for i := range operation.OperationDefinitions { + if operation.OperationDefinitions[i].Name.Length() == 0 { + l.StopWithExternalErr(operationreport.ErrAnonymousOperationMustBeTheOnlyOperationInDocument()) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_operation_name_uniqueness.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_operation_name_uniqueness.go new file mode 100644 index 00000000000..7be950305b0 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_operation_name_uniqueness.go @@ -0,0 +1,41 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// OperationNameUniqueness validates if all operation names are unique +func OperationNameUniqueness() Rule { + return func(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(&operationNameUniquenessVisitor{walker}) + } +} + +type operationNameUniquenessVisitor struct { + *astvisitor.Walker +} + +func (o *operationNameUniquenessVisitor) EnterDocument(operation, definition *ast.Document) { + if len(operation.OperationDefinitions) <= 1 { + return + } + + for i := range operation.OperationDefinitions { + for k := range operation.OperationDefinitions { + if i == k || i > k { + continue + } + + left := operation.OperationDefinitions[i].Name + right := operation.OperationDefinitions[k].Name + + if ast.ByteSliceEquals(left, operation.Input, right, operation.Input) { + operationName := operation.Input.ByteSlice(operation.OperationDefinitions[i].Name) + o.StopWithExternalErr(operationreport.ErrOperationNameMustBeUnique(operationName)) + return + } + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_required_arguments.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_required_arguments.go new file mode 100644 index 00000000000..0c3cbf9bec6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_required_arguments.go @@ -0,0 +1,53 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// RequiredArguments validates if all required arguments are present +func RequiredArguments() Rule { + return func(walker *astvisitor.Walker) { + visitor := requiredArgumentsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterFieldVisitor(&visitor) + } +} + +type requiredArgumentsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (r *requiredArgumentsVisitor) EnterDocument(operation, definition *ast.Document) { + r.operation = operation + r.definition = definition +} + +func (r *requiredArgumentsVisitor) EnterField(ref int) { + + fieldName := r.operation.FieldNameBytes(ref) + inputValueDefinitions := r.definition.NodeFieldDefinitionArgumentsDefinitions(r.EnclosingTypeDefinition, fieldName) + + for _, i := range inputValueDefinitions { + if r.definition.InputValueDefinitionArgumentIsOptional(i) { + continue + } + + name := r.definition.InputValueDefinitionNameBytes(i) + + argument, exists := r.operation.FieldArgument(ref, name) + if !exists { + r.StopWithExternalErr(operationreport.ErrArgumentRequiredOnField(name, fieldName)) + return + } + + if r.operation.ArgumentValue(argument).Kind == ast.ValueKindNull { + r.StopWithExternalErr(operationreport.ErrArgumentOnFieldMustNotBeNull(name, fieldName)) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_subscription_single_root_field.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_subscription_single_root_field.go new file mode 100644 index 00000000000..104d688bdca --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_subscription_single_root_field.go @@ -0,0 +1,37 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// SubscriptionSingleRootField validates if subscriptions have a single root field +func SubscriptionSingleRootField() Rule { + return func(walker *astvisitor.Walker) { + visitor := subscriptionSingleRootFieldVisitor{walker} + walker.RegisterEnterDocumentVisitor(&visitor) + } +} + +type subscriptionSingleRootFieldVisitor struct { + *astvisitor.Walker +} + +func (s *subscriptionSingleRootFieldVisitor) EnterDocument(operation, definition *ast.Document) { + for i := range operation.OperationDefinitions { + if operation.OperationDefinitions[i].OperationType == ast.OperationTypeSubscription { + selections := len(operation.SelectionSets[operation.OperationDefinitions[i].SelectionSet].SelectionRefs) + if selections > 1 { + subscriptionName := operation.Input.ByteSlice(operation.OperationDefinitions[i].Name) + s.StopWithExternalErr(operationreport.ErrSubscriptionMustOnlyHaveOneRootSelection(subscriptionName)) + return + } else if selections == 1 { + ref := operation.SelectionSets[operation.OperationDefinitions[i].SelectionSet].SelectionRefs[0] + if operation.Selections[ref].Kind == ast.SelectionKindField { + return + } + } + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_valid_arguments.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_valid_arguments.go new file mode 100644 index 00000000000..03a6f03d004 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_valid_arguments.go @@ -0,0 +1,179 @@ +package astvalidation + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// ValidArguments validates if arguments are valid: values and variables has compatible types +// deep variables comparison is handled by Values +func ValidArguments() Rule { + return func(walker *astvisitor.Walker) { + visitor := validArgumentsVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + } +} + +type validArgumentsVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (v *validArgumentsVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation = operation + v.definition = definition +} + +func (v *validArgumentsVisitor) EnterArgument(ref int) { + definitionRef, exists := v.ArgumentInputValueDefinition(ref) + + if !exists { + return + } + + value := v.operation.ArgumentValue(ref) + v.validateIfValueSatisfiesInputFieldDefinition(value, definitionRef) +} + +func (v *validArgumentsVisitor) validateIfValueSatisfiesInputFieldDefinition(value ast.Value, inputValueDefinitionRef int) { + var ( + satisfied bool + operationTypeRef int + variableDefinitionRef int + ) + + switch value.Kind { + case ast.ValueKindVariable: + satisfied, operationTypeRef, variableDefinitionRef = v.variableValueSatisfiesInputValueDefinition(value.Ref, inputValueDefinitionRef) + case ast.ValueKindEnum, + ast.ValueKindNull, + ast.ValueKindBoolean, + ast.ValueKindInteger, + ast.ValueKindString, + ast.ValueKindFloat, + ast.ValueKindObject, + ast.ValueKindList: + // this types of values are covered by Values() / valuesVisitor + return + default: + v.StopWithInternalErr(fmt.Errorf("validateIfValueSatisfiesInputFieldDefinition: not implemented for value.Kind: %s", value.Kind)) + return + } + + if satisfied { + return + } + + printedValue, err := v.operation.PrintValueBytes(value, nil) + if v.HandleInternalErr(err) { + return + } + + typeRef := v.definition.InputValueDefinitionType(inputValueDefinitionRef) + expectedTypeName, err := v.definition.PrintTypeBytes(typeRef, nil) + if v.HandleInternalErr(err) { + return + } + + actualTypeName, err := v.operation.PrintTypeBytes(operationTypeRef, nil) + if v.HandleInternalErr(err) { + return + } + + v.StopWithExternalErr(operationreport.ErrVariableTypeDoesntSatisfyInputValueDefinition(printedValue, actualTypeName, expectedTypeName, value.Position, v.operation.VariableDefinitions[variableDefinitionRef].VariableValue.Position)) +} + +func (v *validArgumentsVisitor) variableValueSatisfiesInputValueDefinition(variableValue, inputValueDefinition int) (satisfies bool, operationTypeRef int, variableDefRef int) { + variableDefinitionRef, exists := v.variableDefinition(variableValue) + if !exists { + return false, ast.InvalidRef, variableDefinitionRef + } + + operationTypeRef = v.operation.VariableDefinitions[variableDefinitionRef].Type + definitionTypeRef := v.definition.InputValueDefinitions[inputValueDefinition].Type + + hasDefaultValue := v.validDefaultValue(v.operation.VariableDefinitions[variableDefinitionRef].DefaultValue) || + v.validDefaultValue(v.definition.InputValueDefinitions[inputValueDefinition].DefaultValue) + + return v.operationTypeSatisfiesDefinitionType(operationTypeRef, definitionTypeRef, hasDefaultValue), operationTypeRef, variableDefinitionRef +} + +func (v *validArgumentsVisitor) variableDefinition(variableValueRef int) (ref int, exists bool) { + variableName := v.operation.VariableValueNameBytes(variableValueRef) + + if v.Ancestors[0].Kind == ast.NodeKindOperationDefinition { + return v.operation.VariableDefinitionByNameAndOperation(v.Ancestors[0].Ref, variableName) + } + + for opDefRef := 0; opDefRef < len(v.operation.OperationDefinitions); opDefRef++ { + ref, exists = v.operation.VariableDefinitionByNameAndOperation(opDefRef, variableName) + if exists { + return + } + } + + return ast.InvalidRef, false +} + +func (v *validArgumentsVisitor) validDefaultValue(value ast.DefaultValue) bool { + return value.IsDefined && value.Value.Kind != ast.ValueKindNull +} + +func (v *validArgumentsVisitor) operationTypeSatisfiesDefinitionType(operationTypeRef int, definitionTypeRef int, hasDefaultValue bool) bool { + opKind := v.operation.Types[operationTypeRef].TypeKind + defKind := v.definition.Types[definitionTypeRef].TypeKind + + // A nullable op type is compatible with a non-null def type if the def has + // a default value. Strip the def non-null and continue comparing. This + // logic is only valid before any unnesting of types occurs, which is why + // it's outside the for loop below. + // + // Example: + // Op: someField(arg: Boolean): String + // Def: someField(arg: Boolean! = false): String # Boolean! -> Boolean + if opKind != ast.TypeKindNonNull && defKind == ast.TypeKindNonNull && hasDefaultValue { + definitionTypeRef = v.definition.Types[definitionTypeRef].OfType + } + + // Unnest the op and def arg types until a named type is reached, + // then compare. + for { + if operationTypeRef == -1 || definitionTypeRef == -1 { + return false + } + opKind = v.operation.Types[operationTypeRef].TypeKind + defKind = v.definition.Types[definitionTypeRef].TypeKind + + // If the op arg type is stricter than the def arg type, that's okay. + // Strip the op non-null and continue comparing. + // + // Example: + // Op: someField(arg: Boolean!): String # Boolean! -> Boolean + // Def: someField(arg: Boolean): String + if opKind == ast.TypeKindNonNull && defKind != ast.TypeKindNonNull { + operationTypeRef = v.operation.Types[operationTypeRef].OfType + continue + } + + if opKind != defKind { + return false + } + if opKind == ast.TypeKindNamed { + // defKind is also a named type because at this point both kinds + // are the same! Compare the names. + + return bytes.Equal(v.operation.Input.ByteSlice(v.operation.Types[operationTypeRef].Name), + v.definition.Input.ByteSlice(v.definition.Types[definitionTypeRef].Name)) + } + // Both types are non-null or list. Unnest and continue comparing. + operationTypeRef = v.operation.Types[operationTypeRef].OfType + definitionTypeRef = v.definition.Types[definitionTypeRef].OfType + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_validate_field_selections.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_validate_field_selections.go new file mode 100644 index 00000000000..7fc47e68762 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_validate_field_selections.go @@ -0,0 +1,89 @@ +package astvalidation + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// FieldSelections validates if all FieldSelections are possible and valid +func FieldSelections() Rule { + return func(walker *astvisitor.Walker) { + fieldDefined := fieldDefined{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&fieldDefined) + walker.RegisterEnterFieldVisitor(&fieldDefined) + } +} + +type fieldDefined struct { + *astvisitor.Walker + operation *ast.Document + definition *ast.Document +} + +func (f *fieldDefined) EnterDocument(operation, definition *ast.Document) { + f.operation = operation + f.definition = definition +} + +func (f *fieldDefined) ValidateUnionField(ref int, enclosingTypeDefinition ast.Node) { + if bytes.Equal(f.operation.FieldNameBytes(ref), literal.TYPENAME) { + return + } + fieldName := f.operation.FieldNameBytes(ref) + unionName := f.definition.NodeNameBytes(enclosingTypeDefinition) + f.StopWithExternalErr(operationreport.ErrFieldSelectionOnUnion(fieldName, unionName)) +} + +func (f *fieldDefined) ValidateInterfaceObjectTypeField(ref int, enclosingTypeDefinition ast.Node) { + fieldName := f.operation.FieldNameBytes(ref) + if bytes.Equal(fieldName, literal.TYPENAME) { + return + } + typeName := f.definition.NodeNameBytes(enclosingTypeDefinition) + hasSelections := f.operation.FieldHasSelections(ref) + definitions := f.definition.NodeFieldDefinitions(enclosingTypeDefinition) + for _, i := range definitions { + definitionName := f.definition.FieldDefinitionNameBytes(i) + if bytes.Equal(fieldName, definitionName) { + // field is defined + fieldDefinitionTypeKind := f.definition.FieldDefinitionTypeNode(i).Kind + switch { + case hasSelections && fieldDefinitionTypeKind == ast.NodeKindScalarTypeDefinition: + f.StopWithExternalErr(operationreport.ErrFieldSelectionOnScalar(fieldName, definitionName)) + case !hasSelections && (fieldDefinitionTypeKind != ast.NodeKindScalarTypeDefinition && fieldDefinitionTypeKind != ast.NodeKindEnumTypeDefinition): + f.StopWithExternalErr(operationreport.ErrMissingFieldSelectionOnNonScalar(fieldName, typeName)) + } + return + } + } + + f.StopWithExternalErr(operationreport.ErrFieldUndefinedOnType(fieldName, typeName)) +} + +func (f *fieldDefined) ValidateScalarField(ref int, enclosingTypeDefinition ast.Node) { + fieldName := f.operation.FieldNameBytes(ref) + scalarTypeName := f.operation.NodeNameBytes(enclosingTypeDefinition) + f.StopWithExternalErr(operationreport.ErrFieldSelectionOnScalar(fieldName, scalarTypeName)) +} + +func (f *fieldDefined) EnterField(ref int) { + switch f.EnclosingTypeDefinition.Kind { + case ast.NodeKindUnionTypeDefinition: + f.ValidateUnionField(ref, f.EnclosingTypeDefinition) + case ast.NodeKindInterfaceTypeDefinition, ast.NodeKindObjectTypeDefinition: + f.ValidateInterfaceObjectTypeField(ref, f.EnclosingTypeDefinition) + case ast.NodeKindScalarTypeDefinition: + f.ValidateScalarField(ref, f.EnclosingTypeDefinition) + default: + fieldName := f.operation.FieldNameBytes(ref) + typeName := f.operation.NodeNameBytes(f.EnclosingTypeDefinition) + f.StopWithInternalErr(fmt.Errorf("astvalidation/fieldDefined/EnterField: field: %s selection on type: %s unhandled", fieldName, typeName)) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_values.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_values.go new file mode 100644 index 00000000000..0e3b6e0ad96 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_values.go @@ -0,0 +1,717 @@ +package astvalidation + +import ( + "bytes" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// Values validates if values are used properly +func Values() Rule { + return func(walker *astvisitor.Walker) { + visitor := valuesVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + walker.RegisterEnterVariableDefinitionVisitor(&visitor) + } +} + +type valuesVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + importer astimport.Importer +} + +func (v *valuesVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation = operation + v.definition = definition +} + +func (v *valuesVisitor) EnterVariableDefinition(ref int) { + if !v.operation.VariableDefinitionHasDefaultValue(ref) { + return // variable has no default value, deep type check not required + } + + v.valueSatisfiesOperationType(v.operation.VariableDefinitions[ref].DefaultValue.Value, v.operation.VariableDefinitions[ref].Type) +} + +func (v *valuesVisitor) EnterArgument(ref int) { + + definition, exists := v.ArgumentInputValueDefinition(ref) + if !exists { + return + } + + value := v.operation.ArgumentValue(ref) + if value.Kind == ast.ValueKindVariable { + variableName := v.operation.VariableValueNameBytes(value.Ref) + variableDefinition, exists := v.operation.VariableDefinitionByNameAndOperation(v.Ancestors[0].Ref, variableName) + if !exists { + operationName := v.operation.NodeNameBytes(v.Ancestors[0]) + v.StopWithExternalErr(operationreport.ErrVariableNotDefinedOnOperation(variableName, operationName)) + return + } + if !v.operation.VariableDefinitions[variableDefinition].DefaultValue.IsDefined { + return // variable has no default value, deep type check not required + } + value = v.operation.VariableDefinitions[variableDefinition].DefaultValue.Value + } + + v.valueSatisfiesInputValueDefinitionType(value, v.definition.InputValueDefinitions[definition].Type) +} + +func (v *valuesVisitor) valueSatisfiesOperationType(value ast.Value, operationTypeRef int) bool { + switch v.operation.Types[operationTypeRef].TypeKind { + case ast.TypeKindNonNull: + return v.valuesSatisfiesOperationNonNullType(value, operationTypeRef) + case ast.TypeKindNamed: + return v.valuesSatisfiesOperationNamedType(value, operationTypeRef) + case ast.TypeKindList: + return v.valueSatisfiesOperationListType(value, operationTypeRef, v.operation.Types[operationTypeRef].OfType) + default: + v.handleOperationTypeError(value, operationTypeRef) + return false + } +} + +func (v *valuesVisitor) valuesSatisfiesOperationNonNullType(value ast.Value, operationTypeRef int) bool { + if value.Kind == ast.ValueKindNull { + v.handleOperationUnexpectedNullError(value, operationTypeRef) + return false + } + return v.valueSatisfiesOperationType(value, v.operation.Types[operationTypeRef].OfType) +} + +func (v *valuesVisitor) valuesSatisfiesOperationNamedType(value ast.Value, operationTypeRef int) bool { + if value.Kind == ast.ValueKindNull { + // null always satisfies not required type + return true + } + + typeName := v.operation.ResolveTypeNameBytes(operationTypeRef) + node, exists := v.definition.Index.FirstNodeByNameBytes(typeName) + if !exists { + v.handleOperationTypeError(value, operationTypeRef) + return false + } + + definitionTypeRef := ast.InvalidRef + + for ref := 0; ref < len(v.definition.Types); ref++ { + if v.definition.Types[ref].TypeKind != ast.TypeKindNamed { + continue + } + + if bytes.Equal(v.definition.TypeNameBytes(ref), typeName) { + definitionTypeRef = ref + break + } + } + + if definitionTypeRef == ast.InvalidRef { + // should not happen, as in case we have not found named type node we will report it earlier + return false + } + + return v.valueSatisfiesTypeDefinitionNode(value, definitionTypeRef, node) +} + +func (v *valuesVisitor) valueSatisfiesOperationListType(value ast.Value, operationTypeRef int, listItemType int) bool { + if value.Kind == ast.ValueKindNull { + return true + } + + if value.Kind != ast.ValueKindList { + return v.valueSatisfiesOperationType(value, listItemType) + } + + if v.operation.Types[listItemType].TypeKind == ast.TypeKindNonNull { + if len(v.operation.ListValues[value.Ref].Refs) == 0 { + v.handleOperationTypeError(value, operationTypeRef) + return false + } + listItemType = v.operation.Types[listItemType].OfType + } + + valid := true + + for _, i := range v.operation.ListValues[value.Ref].Refs { + listValue := v.operation.Value(i) + if !v.valueSatisfiesOperationType(listValue, listItemType) { + valid = false + } + } + + return valid +} + +func (v *valuesVisitor) valueSatisfiesInputValueDefinitionType(value ast.Value, definitionTypeRef int) bool { + switch v.definition.Types[definitionTypeRef].TypeKind { + case ast.TypeKindNonNull: + return v.valuesSatisfiesNonNullType(value, definitionTypeRef) + case ast.TypeKindNamed: + return v.valuesSatisfiesNamedType(value, definitionTypeRef) + case ast.TypeKindList: + return v.valueSatisfiesListType(value, definitionTypeRef, v.definition.Types[definitionTypeRef].OfType) + default: + v.handleTypeError(value, definitionTypeRef) + return false + } +} + +func (v *valuesVisitor) valuesSatisfiesNonNullType(value ast.Value, definitionTypeRef int) bool { + switch value.Kind { + case ast.ValueKindNull: + v.handleUnexpectedNullError(value, definitionTypeRef) + return false + case ast.ValueKindVariable: + variableDefinitionRef, variableTypeRef, _, ok := v.operationVariableType(value.Ref) + if !ok { + v.handleTypeError(value, definitionTypeRef) + return false + } + + if v.operation.VariableDefinitionHasDefaultValue(variableDefinitionRef) { + return v.valueSatisfiesInputValueDefinitionType(v.operation.VariableDefinitions[variableDefinitionRef].DefaultValue.Value, definitionTypeRef) + } + + importedDefinitionType := v.importer.ImportType(definitionTypeRef, v.definition, v.operation) + if !v.operation.TypesAreEqualDeep(importedDefinitionType, variableTypeRef) { + v.handleVariableHasIncompatibleTypeError(value, definitionTypeRef) + return false + } + return true + } + return v.valueSatisfiesInputValueDefinitionType(value, v.definition.Types[definitionTypeRef].OfType) +} + +func (v *valuesVisitor) valuesSatisfiesNamedType(value ast.Value, definitionTypeRef int) bool { + if value.Kind == ast.ValueKindNull { + // null always satisfies not required type + return true + } + + typeName := v.definition.ResolveTypeNameBytes(definitionTypeRef) + node, exists := v.definition.Index.FirstNodeByNameBytes(typeName) + if !exists { + v.handleTypeError(value, definitionTypeRef) + return false + } + + return v.valueSatisfiesTypeDefinitionNode(value, definitionTypeRef, node) +} + +func (v *valuesVisitor) valueSatisfiesListType(value ast.Value, definitionTypeRef int, listItemType int) bool { + + if value.Kind == ast.ValueKindVariable { + variableDefinitionRef, actualType, _, ok := v.operationVariableType(value.Ref) + if !ok { + v.handleTypeError(value, definitionTypeRef) + return false + } + + if v.operation.VariableDefinitionHasDefaultValue(variableDefinitionRef) { + return v.valueSatisfiesInputValueDefinitionType(v.operation.VariableDefinitions[variableDefinitionRef].DefaultValue.Value, definitionTypeRef) + } + + expectedType := v.importer.ImportType(listItemType, v.definition, v.operation) + if v.operation.Types[actualType].TypeKind == ast.TypeKindNonNull { + actualType = v.operation.Types[actualType].OfType + } + if v.operation.Types[actualType].TypeKind == ast.TypeKindList { + actualType = v.operation.Types[actualType].OfType + } + if !v.operation.TypesAreEqualDeep(expectedType, actualType) { + v.handleVariableHasIncompatibleTypeError(value, definitionTypeRef) + return false + } + return true + } + + if value.Kind == ast.ValueKindNull { + return true + } + + if value.Kind != ast.ValueKindList { + return v.valueSatisfiesInputValueDefinitionType(value, listItemType) + } + + if v.definition.Types[listItemType].TypeKind == ast.TypeKindNonNull { + if len(v.operation.ListValues[value.Ref].Refs) == 0 { + // [] empty list is a valid input for [item!] lists + return true + } + listItemType = v.definition.Types[listItemType].OfType + } + + valid := true + + for _, i := range v.operation.ListValues[value.Ref].Refs { + listValue := v.operation.Value(i) + if !v.valueSatisfiesInputValueDefinitionType(listValue, listItemType) { + valid = false + } + } + + return valid +} + +func (v *valuesVisitor) valueSatisfiesTypeDefinitionNode(value ast.Value, definitionTypeRef int, node ast.Node) bool { + switch node.Kind { + case ast.NodeKindEnumTypeDefinition: + return v.valueSatisfiesEnum(value, definitionTypeRef, node) + case ast.NodeKindScalarTypeDefinition: + return v.valueSatisfiesScalar(value, definitionTypeRef, node.Ref) + case ast.NodeKindInputObjectTypeDefinition: + return v.valueSatisfiesInputObjectTypeDefinition(value, definitionTypeRef, node.Ref) + } + return false +} + +func (v *valuesVisitor) valueSatisfiesEnum(value ast.Value, definitionTypeRef int, node ast.Node) bool { + if value.Kind == ast.ValueKindVariable { + expectedTypeName := node.NameBytes(v.definition) + return v.variableValueHasMatchingTypeName(value, definitionTypeRef, expectedTypeName) + } + + if value.Kind != ast.ValueKindEnum { + v.handleUnexpectedEnumValueError(value, definitionTypeRef) + return false + } + enumValue := v.operation.EnumValueNameBytes(value.Ref) + + if !v.definition.EnumTypeDefinitionContainsEnumValue(node.Ref, enumValue) { + v.handleNotExistingEnumValueError(value, definitionTypeRef) + return false + } + + return true +} + +func (v *valuesVisitor) valueSatisfiesScalar(value ast.Value, definitionTypeRef int, scalar int) bool { + scalarName := v.definition.ScalarTypeDefinitionNameBytes(scalar) + + if value.Kind == ast.ValueKindVariable { + return v.variableValueHasMatchingTypeName(value, definitionTypeRef, scalarName) + } + + switch { + case bytes.Equal(scalarName, literal.ID): + return v.valueSatisfiesScalarID(value, definitionTypeRef) + case bytes.Equal(scalarName, literal.BOOLEAN): + return v.valueSatisfiesScalarBoolean(value, definitionTypeRef) + case bytes.Equal(scalarName, literal.INT): + return v.valueSatisfiesScalarInt(value, definitionTypeRef) + case bytes.Equal(scalarName, literal.FLOAT): + return v.valueSatisfiesScalarFloat(value, definitionTypeRef) + case bytes.Equal(scalarName, literal.STRING): + return v.valueSatisfiesScalarString(value, definitionTypeRef, true) + default: + return v.valueSatisfiesScalarString(value, definitionTypeRef, false) + } +} + +func (v *valuesVisitor) valueSatisfiesScalarID(value ast.Value, definitionTypeRef int) bool { + if value.Kind == ast.ValueKindString || value.Kind == ast.ValueKindInteger { + return true + } + + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return false + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyID(printedValue, printedType, value.Position)) + + return false +} + +func (v *valuesVisitor) valueSatisfiesScalarBoolean(value ast.Value, definitionTypeRef int) bool { + if value.Kind == ast.ValueKindBoolean { + return true + } + + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return false + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyBoolean(printedValue, printedType, value.Position)) + + return false +} + +func (v *valuesVisitor) valueSatisfiesScalarInt(value ast.Value, definitionTypeRef int) bool { + var isValidInt32 bool + isInt := value.Kind == ast.ValueKindInteger + + if isInt { + isValidInt32 = v.operation.IntValueValidInt32(value.Ref) + } + + if isInt && isValidInt32 { + return true + } + + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return false + } + + if !isInt { + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyInt(printedValue, printedType, value.Position)) + return false + } + + v.Report.AddExternalError(operationreport.ErrBigIntValueDoesntSatisfyInt(printedValue, printedType, value.Position)) + return false +} + +func (v *valuesVisitor) valueSatisfiesScalarFloat(value ast.Value, definitionTypeRef int) bool { + if value.Kind == ast.ValueKindFloat || value.Kind == ast.ValueKindInteger { + return true + } + + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return false + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyFloat(printedValue, printedType, value.Position)) + + return false +} + +func (v *valuesVisitor) valueSatisfiesScalarString(value ast.Value, definitionTypeRef int, builtInStringScalar bool) bool { + if value.Kind == ast.ValueKindString { + return true + } + + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return false + } + + if builtInStringScalar { + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyString(printedValue, printedType, value.Position)) + } else { + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyType(printedValue, printedType, value.Position)) + } + + return false +} + +func (v *valuesVisitor) valueSatisfiesInputObjectTypeDefinition(value ast.Value, definitionTypeRef int, inputObjectTypeDefinition int) bool { + if value.Kind == ast.ValueKindVariable { + expectedTypeName := v.definition.InputObjectTypeDefinitionNameBytes(inputObjectTypeDefinition) + return v.variableValueHasMatchingTypeName(value, definitionTypeRef, expectedTypeName) + } + + if value.Kind != ast.ValueKindObject { + v.handleNotObjectTypeError(value, definitionTypeRef) + return false + } + + valid := true + + for _, i := range v.definition.InputObjectTypeDefinitions[inputObjectTypeDefinition].InputFieldsDefinition.Refs { + if !v.objectValueSatisfiesInputValueDefinition(value, inputObjectTypeDefinition, i) { + valid = false + } + } + + if !valid { + return false + } + + for _, i := range v.operation.ObjectValues[value.Ref].Refs { + if !v.objectFieldDefined(i, inputObjectTypeDefinition) { + objectFieldName := v.operation.ObjectFieldNameBytes(i) + def := v.definition.Input.ByteSlice(v.definition.InputObjectTypeDefinitions[inputObjectTypeDefinition].Name) + + v.Report.AddExternalError(operationreport.ErrUnknownFieldOfInputObject(objectFieldName, def, v.operation.ObjectField(i).Position)) + valid = false + } + } + + if !valid { + return false + } + + if v.objectValueHasDuplicateFields(value.Ref) { + return false + } + + return true +} + +func (v *valuesVisitor) objectValueHasDuplicateFields(objectValue int) bool { + hasDuplicates := false + + reportedFieldRefs := make(map[int]struct{}) + for i, j := range v.operation.ObjectValues[objectValue].Refs { + for k, l := range v.operation.ObjectValues[objectValue].Refs { + if i == k || i > k { + continue + } + + if _, ok := reportedFieldRefs[l]; ok { + continue + } + + fieldName := v.operation.ObjectFieldNameBytes(j) + otherFieldName := v.operation.ObjectFieldNameBytes(l) + + if bytes.Equal(fieldName, otherFieldName) { + v.Report.AddExternalError(operationreport.ErrDuplicatedFieldInputObject( + fieldName, + v.operation.ObjectField(j).Position, + v.operation.ObjectField(l).Position)) + hasDuplicates = true + reportedFieldRefs[l] = struct{}{} + } + } + } + + return hasDuplicates +} + +func (v *valuesVisitor) objectFieldDefined(objectField, inputObjectTypeDefinition int) bool { + name := v.operation.ObjectFieldNameBytes(objectField) + for _, i := range v.definition.InputObjectTypeDefinitions[inputObjectTypeDefinition].InputFieldsDefinition.Refs { + if bytes.Equal(name, v.definition.InputValueDefinitionNameBytes(i)) { + return true + } + } + return false +} + +func (v *valuesVisitor) objectValueSatisfiesInputValueDefinition(objectValue ast.Value, inputObjectDefinition, inputValueDefinition int) bool { + + name := v.definition.InputValueDefinitionNameBytes(inputValueDefinition) + definitionTypeRef := v.definition.InputValueDefinitionType(inputValueDefinition) + + for _, i := range v.operation.ObjectValues[objectValue.Ref].Refs { + if bytes.Equal(name, v.operation.ObjectFieldNameBytes(i)) { + value := v.operation.ObjectFieldValue(i) + return v.valueSatisfiesInputValueDefinitionType(value, definitionTypeRef) + } + } + + // argument is not present on object value, if arg is optional it's still ok, otherwise not satisfied + if !v.definition.InputValueDefinitionArgumentIsOptional(inputValueDefinition) { + v.handleMissingRequiredFieldOfInputObjectError(objectValue, name, inputObjectDefinition, inputValueDefinition) + return false + } + + return true +} + +func (v *valuesVisitor) variableValueHasMatchingTypeName(value ast.Value, definitionTypeRef int, expectedTypeName []byte) bool { + variableDefinitionRef, _, actualTypeName, ok := v.operationVariableType(value.Ref) + if !ok { + v.handleVariableHasIncompatibleTypeError(value, definitionTypeRef) + return false + } + + if v.operation.VariableDefinitionHasDefaultValue(variableDefinitionRef) { + return v.valueSatisfiesInputValueDefinitionType(v.operation.VariableDefinitions[variableDefinitionRef].DefaultValue.Value, definitionTypeRef) + } + + if !bytes.Equal(actualTypeName, expectedTypeName) { + v.handleVariableHasIncompatibleTypeError(value, definitionTypeRef) + return false + } + + return true +} + +func (v *valuesVisitor) handleTypeError(value ast.Value, definitionTypeRef int) { + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyType(printedValue, printedType, value.Position)) +} + +func (v *valuesVisitor) handleNotObjectTypeError(value ast.Value, definitionTypeRef int) { + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrValueIsNotAnInputObjectType(printedValue, printedType, value.Position)) +} + +func (v *valuesVisitor) handleUnexpectedNullError(value ast.Value, definitionTypeRef int) { + printedType, err := v.definition.PrintTypeBytes(definitionTypeRef, nil) + if v.HandleInternalErr(err) { + return + } + + v.Report.AddExternalError(operationreport.ErrNullValueDoesntSatisfyInputValueDefinition(printedType, value.Position)) +} + +func (v *valuesVisitor) handleUnexpectedEnumValueError(value ast.Value, definitionTypeRef int) { + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyEnum(printedValue, printedType, value.Position)) +} + +func (v *valuesVisitor) handleNotExistingEnumValueError(value ast.Value, definitionTypeRef int) { + printedValue, printedType, ok := v.printValueAndUnderlyingType(value, definitionTypeRef) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntExistsInEnum(printedValue, printedType, value.Position)) +} + +func (v *valuesVisitor) handleVariableHasIncompatibleTypeError(value ast.Value, definitionTypeRef int) { + printedValue, ok := v.printOperationValue(value) + if !ok { + return + } + + expectedTypeName, err := v.definition.PrintTypeBytes(definitionTypeRef, nil) + if v.HandleInternalErr(err) { + return + } + + variableDefinitionRef, _, actualTypeName, ok := v.operationVariableType(value.Ref) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrVariableTypeDoesntSatisfyInputValueDefinition( + printedValue, + actualTypeName, + expectedTypeName, + value.Position, + v.operation.VariableDefinitions[variableDefinitionRef].VariableValue.Position, + )) +} + +func (v *valuesVisitor) handleMissingRequiredFieldOfInputObjectError(value ast.Value, fieldName ast.ByteSlice, inputObjectDefinition, inputValueDefinition int) { + printedType, err := v.definition.PrintTypeBytes(v.definition.InputValueDefinitions[inputValueDefinition].Type, nil) + if v.HandleInternalErr(err) { + return + } + + v.Report.AddExternalError(operationreport.ErrMissingRequiredFieldOfInputObject( + v.definition.InputObjectTypeDefinitionNameBytes(inputObjectDefinition), + fieldName, + printedType, + value.Position, + )) +} + +func (v *valuesVisitor) handleOperationTypeError(value ast.Value, operationTypeRef int) { + printedValue, printedType, ok := v.printOperationValueAndUnderlyingType(value, operationTypeRef) + if !ok { + return + } + + v.Report.AddExternalError(operationreport.ErrValueDoesntSatisfyType(printedValue, printedType, value.Position)) +} + +func (v *valuesVisitor) handleOperationUnexpectedNullError(value ast.Value, operationTypeRef int) { + printedType, err := v.operation.PrintTypeBytes(operationTypeRef, nil) + if v.HandleInternalErr(err) { + return + } + + v.Report.AddExternalError(operationreport.ErrNullValueDoesntSatisfyInputValueDefinition(printedType, value.Position)) +} + +func (v *valuesVisitor) printValueAndUnderlyingType(value ast.Value, definitionTypeRef int) (printedValue, printedType []byte, ok bool) { + var err error + + printedValue, ok = v.printOperationValue(value) + if !ok { + return nil, nil, false + } + + underlyingType := v.definition.ResolveUnderlyingType(definitionTypeRef) + printedType, err = v.definition.PrintTypeBytes(underlyingType, nil) + if v.HandleInternalErr(err) { + return nil, nil, false + } + + return printedValue, printedType, true +} + +func (v *valuesVisitor) printOperationValueAndUnderlyingType(value ast.Value, operationTypeRef int) (printedValue, printedType []byte, ok bool) { + printedValue, ok = v.printOperationValue(value) + if !ok { + return nil, nil, false + } + + printedType, ok = v.printUnderlyingOperationType(operationTypeRef) + if !ok { + return nil, nil, false + } + + return printedValue, printedType, true +} + +func (v *valuesVisitor) printUnderlyingOperationType(operationTypeRef int) (printedType []byte, ok bool) { + var err error + + underlyingType := v.operation.ResolveUnderlyingType(operationTypeRef) + printedType, err = v.operation.PrintTypeBytes(underlyingType, nil) + if v.HandleInternalErr(err) { + return nil, false + } + + return printedType, true +} + +func (v *valuesVisitor) printOperationValue(value ast.Value) (printedValue []byte, ok bool) { + var err error + printedValue, err = v.operation.PrintValueBytes(value, nil) + if v.HandleInternalErr(err) { + return nil, false + } + + return printedValue, true +} + +func (v *valuesVisitor) operationVariableDefinition(variableValueRef int) (ref int, exists bool) { + variableName := v.operation.VariableValueNameBytes(variableValueRef) + + if v.Ancestors[0].Kind == ast.NodeKindOperationDefinition { + return v.operation.VariableDefinitionByNameAndOperation(v.Ancestors[0].Ref, variableName) + } + + for opDefRef := 0; opDefRef < len(v.operation.OperationDefinitions); opDefRef++ { + ref, exists = v.operation.VariableDefinitionByNameAndOperation(opDefRef, variableName) + if exists { + return + } + } + + return ast.InvalidRef, false +} + +func (v *valuesVisitor) operationVariableType(variableValueRef int) (variableDefinitionRef int, variableTypeRef int, typeName ast.ByteSlice, ok bool) { + variableDefRef, exists := v.operationVariableDefinition(variableValueRef) + if !exists { + return ast.InvalidRef, ast.InvalidRef, nil, false + } + + variableTypeRef = v.operation.VariableDefinitions[variableDefRef].Type + typeName = v.operation.ResolveTypeNameBytes(variableTypeRef) + + return variableDefRef, variableTypeRef, typeName, true +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variable_uniqueness.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variable_uniqueness.go new file mode 100644 index 00000000000..c82870b68ce --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variable_uniqueness.go @@ -0,0 +1,57 @@ +package astvalidation + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// VariableUniqueness validates if variables are unique in a given document +func VariableUniqueness() Rule { + return func(walker *astvisitor.Walker) { + visitor := variableUniquenessVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterVariableDefinitionVisitor(&visitor) + } +} + +type variableUniquenessVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (v *variableUniquenessVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation = operation + v.definition = definition +} + +func (v *variableUniquenessVisitor) EnterVariableDefinition(ref int) { + + name := v.operation.VariableDefinitionNameBytes(ref) + + if v.Ancestors[0].Kind != ast.NodeKindOperationDefinition { + return + } + + variableDefinitions := v.operation.OperationDefinitions[v.Ancestors[0].Ref].VariableDefinitions.Refs + + for _, i := range variableDefinitions { + if i == ref { + continue + } + if bytes.Equal(name, v.operation.VariableDefinitionNameBytes(i)) { + if v.Ancestors[0].Kind != ast.NodeKindOperationDefinition { + v.StopWithInternalErr(fmt.Errorf("variable definition must have Operation ObjectDefinition as root ancestor, got: %s", v.Ancestors[0].Kind)) + return + } + operationName := v.operation.Input.ByteSlice(v.operation.OperationDefinitions[v.Ancestors[0].Ref].Name) + v.StopWithExternalErr(operationreport.ErrVariableMustBeUnique(name, operationName)) + return + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variables_are_input_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variables_are_input_types.go new file mode 100644 index 00000000000..bfd1d0ee01f --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_rule_variables_are_input_types.go @@ -0,0 +1,54 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// VariablesAreInputTypes validates if variables are correct input types +func VariablesAreInputTypes() Rule { + return func(walker *astvisitor.Walker) { + visitor := variablesAreInputTypesVisitor{ + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterVariableDefinitionVisitor(&visitor) + } +} + +type variablesAreInputTypesVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document +} + +func (v *variablesAreInputTypesVisitor) EnterDocument(operation, definition *ast.Document) { + v.operation = operation + v.definition = definition +} + +func (v *variablesAreInputTypesVisitor) EnterVariableDefinition(ref int) { + + typeName := v.operation.ResolveTypeNameBytes(v.operation.VariableDefinitions[ref].Type) + typeDefinitionNode, ok := v.definition.Index.FirstNodeByNameBytes(typeName) + if !ok { + v.Report.AddExternalError(operationreport.ErrUnknownType(typeName, v.operation.Types[v.operation.VariableDefinitions[ref].Type].Position)) + return + } + + switch typeDefinitionNode.Kind { + case ast.NodeKindInputObjectTypeDefinition, ast.NodeKindScalarTypeDefinition, ast.NodeKindEnumTypeDefinition: + return + default: + variableName := v.operation.VariableDefinitionNameBytes(ref) + variableTypePos := v.operation.Types[v.operation.VariableDefinitions[ref].Type].Position + + printedType, err := v.operation.PrintTypeBytes(v.operation.VariableDefinitions[ref].Type, nil) + if v.HandleInternalErr(err) { + return + } + + v.Report.AddExternalError(operationreport.ErrVariableOfTypeIsNoValidInputValue(variableName, printedType, variableTypePos)) + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_validation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_validation.go new file mode 100644 index 00000000000..fb5afbe7f0a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/operation_validation.go @@ -0,0 +1,75 @@ +// Package astvalidation implements the validation rules specified in the GraphQL specification. +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +// DefaultOperationValidator returns a fully initialized OperationValidator with all default rules registered +func DefaultOperationValidator() *OperationValidator { + + validator := OperationValidator{ + walker: astvisitor.NewWalker(48), + } + + validator.RegisterRule(DocumentContainsExecutableOperation()) + validator.RegisterRule(OperationNameUniqueness()) + validator.RegisterRule(LoneAnonymousOperation()) + validator.RegisterRule(SubscriptionSingleRootField()) + validator.RegisterRule(FieldSelections()) + validator.RegisterRule(FieldSelectionMerging()) + validator.RegisterRule(KnownArguments()) + validator.RegisterRule(ValidArguments()) + validator.RegisterRule(Values()) + validator.RegisterRule(ArgumentUniqueness()) + validator.RegisterRule(RequiredArguments()) + validator.RegisterRule(Fragments()) + validator.RegisterRule(DirectivesAreDefined()) + validator.RegisterRule(DirectivesAreInValidLocations()) + validator.RegisterRule(VariableUniqueness()) + validator.RegisterRule(DirectivesAreUniquePerLocation()) + validator.RegisterRule(VariablesAreInputTypes()) + validator.RegisterRule(AllVariableUsesDefined()) + validator.RegisterRule(AllVariablesUsed()) + + return &validator +} + +func NewOperationValidator(rules []Rule) *OperationValidator { + validator := OperationValidator{ + walker: astvisitor.NewWalker(48), + } + + for _, rule := range rules { + validator.RegisterRule(rule) + } + + return &validator +} + +// OperationValidator orchestrates the validation process of Operations +type OperationValidator struct { + walker astvisitor.Walker +} + +// RegisterRule registers a rule to the OperationValidator +func (o *OperationValidator) RegisterRule(rule Rule) { + rule(&o.walker) +} + +// Validate validates the operation against the definition using the registered ruleset. +func (o *OperationValidator) Validate(operation, definition *ast.Document, report *operationreport.Report) ValidationState { + + if report == nil { + report = &operationreport.Report{} + } + + o.walker.Walk(operation, definition, report) + + if report.HasErrors() { + return Invalid + } + return Valid +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule.go new file mode 100644 index 00000000000..8a199eb2704 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule.go @@ -0,0 +1,10 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +var reservedFieldPrefix = []byte("__") + +// Rule is hook to register callback functions on the Walker +type Rule func(walker *astvisitor.Walker) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implement_transitive_interfaces.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implement_transitive_interfaces.go new file mode 100644 index 00000000000..24200299559 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implement_transitive_interfaces.go @@ -0,0 +1,137 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func ImplementTransitiveInterfaces() Rule { + return func(walker *astvisitor.Walker) { + visitor := &implementTransitiveInterfacesVisitor{ + Walker: walker, + } + + walker.RegisterDocumentVisitor(visitor) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + } +} + +type implementTransitiveInterfacesVisitor struct { + *astvisitor.Walker + definition *ast.Document + typesImplementingInterfaces map[string][]string +} + +func (v *implementTransitiveInterfacesVisitor) EnterDocument(operation, definition *ast.Document) { + v.definition = operation + v.typesImplementingInterfaces = map[string][]string{} +} + +// LeaveDocument will iterate over the types implementing interfaces lookup map +// and check if a types with interfaces has all the transitive interfaces in their slice. +// +// Valid (typeName contains interfaceBase from interfaceOne): +// typeName -> [interfaceOne, interfaceBase] +// interfaceOne -> [interfaceBase] +// Invalid (typeName does not contain interfaceBase from interfaceOne): +// typeName -> [interfaceOne] +// interfaceOne -> [interfaceBase] +func (v *implementTransitiveInterfacesVisitor) LeaveDocument(operation, definition *ast.Document) { + for typeName, interfaceNames := range v.typesImplementingInterfaces { + interfaceNamesLookupList := map[string]bool{} + for i := 0; i < len(interfaceNames); i++ { + interfaceNamesLookupList[interfaceNames[i]] = true + } + + for i := 0; i < len(interfaceNames); i++ { + implementedInterfaceName := interfaceNames[i] + if _, ok := v.typesImplementingInterfaces[implementedInterfaceName]; !ok { + continue + } + + for j := 0; j < len(v.typesImplementingInterfaces[implementedInterfaceName]); j++ { + transitiveInterfaceName := v.typesImplementingInterfaces[implementedInterfaceName][j] + if _, ok := interfaceNamesLookupList[transitiveInterfaceName]; !ok { + v.Report.AddExternalError(operationreport.ErrTransitiveInterfaceNotImplemented([]byte(typeName), []byte(transitiveInterfaceName))) + } + } + } + } +} + +func (v *implementTransitiveInterfacesVisitor) EnterInterfaceTypeDefinition(ref int) { + implementsInterfaces := len(v.definition.InterfaceTypeDefinitions[ref].ImplementsInterfaces.Refs) > 0 + if !implementsInterfaces { + return + } + + interfaceName := v.definition.InterfaceTypeDefinitionNameString(ref) + v.collectImplementedInterfaces(interfaceName, v.definition.InterfaceTypeDefinitions[ref].ImplementsInterfaces.Refs) +} + +func (v *implementTransitiveInterfacesVisitor) EnterInterfaceTypeExtension(ref int) { + implementsInterfaces := len(v.definition.InterfaceTypeExtensions[ref].ImplementsInterfaces.Refs) > 0 + if !implementsInterfaces { + return + } + + interfaceName := v.definition.InterfaceTypeExtensionNameString(ref) + fieldDefinitionRefs := v.definition.InterfaceTypeExtensions[ref].FieldsDefinition.Refs + if len(fieldDefinitionRefs) == 0 { + v.Report.AddExternalError(operationreport.ErrTransitiveInterfaceExtensionImplementingWithoutBody([]byte(interfaceName))) + } + v.collectImplementedInterfaces(interfaceName, v.definition.InterfaceTypeExtensions[ref].ImplementsInterfaces.Refs) +} + +func (v *implementTransitiveInterfacesVisitor) EnterObjectTypeDefinition(ref int) { + implementsInterfaces := len(v.definition.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs) > 0 + if !implementsInterfaces { + return + } + + objectTypeName := v.definition.ObjectTypeDefinitionNameString(ref) + v.collectImplementedInterfaces(objectTypeName, v.definition.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs) +} + +func (v *implementTransitiveInterfacesVisitor) EnterObjectTypeExtension(ref int) { + implementsInterfaces := len(v.definition.ObjectTypeExtensions[ref].ImplementsInterfaces.Refs) > 0 + if !implementsInterfaces { + return + } + + objectTypeName := v.definition.ObjectTypeExtensionNameString(ref) + v.collectImplementedInterfaces(objectTypeName, v.definition.ObjectTypeExtensions[ref].ImplementsInterfaces.Refs) +} + +// collectImplementedInterfaces iterates over all implemented interfaces over a given type so that the +// names can be saved into the lookup map on the visitor +// +// Result: +// typeName -> [interfaceOne, interfaceBase] +// interfaceOne -> [interfaceBase] +func (v *implementTransitiveInterfacesVisitor) collectImplementedInterfaces(typeName string, implementedInterfacesRefs []int) { + for i := 0; i < len(implementedInterfacesRefs); i++ { + implementedInterfaceRef := implementedInterfacesRefs[i] + implementedInterfaceName := v.definition.TypeNameString(implementedInterfaceRef) + + if _, ok := v.typesImplementingInterfaces[typeName]; !ok { + v.typesImplementingInterfaces[typeName] = []string{implementedInterfaceName} + } + + skipInterface := false + for j := 0; j < len(v.typesImplementingInterfaces[typeName]); j++ { + if v.typesImplementingInterfaces[typeName][j] == implementedInterfaceName { + skipInterface = true + break + } + } + + if !skipInterface { + v.typesImplementingInterfaces[typeName] = append(v.typesImplementingInterfaces[typeName], implementedInterfaceName) + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implementing_types_are_supersets.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implementing_types_are_supersets.go new file mode 100644 index 00000000000..fd1fc3a3f85 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_implementing_types_are_supersets.go @@ -0,0 +1,242 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func ImplementingTypesAreSupersets() Rule { + return func(walker *astvisitor.Walker) { + visitor := &implementingTypesAreSupersetsVisitor{ + Walker: walker, + } + + walker.RegisterDocumentVisitor(visitor) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + } +} + +type implementingTypesAreSupersetsVisitor struct { + *astvisitor.Walker + definition *ast.Document + implementingTypesWithFields map[string][]string + implementingTypesWithInterfacesNames map[string][]string +} + +func (v *implementingTypesAreSupersetsVisitor) EnterDocument(operation, definition *ast.Document) { + v.definition = operation + v.implementingTypesWithFields = make(map[string][]string) + v.implementingTypesWithInterfacesNames = make(map[string][]string) +} + +// LeaveDocument will iterate over all types which implement an interface by using the interface name. If a +// field does exist on the implemented interface but not on the implementing type, then the rule will consider it +// as invalid. +// +// Valid: +// ( interfaceBase -> [fieldA] ) +// interfaceOneImplementingInterfaceBase -> [fieldA, fieldB] +// objectTypeImplementingInterfaceOne -> [fieldA, fieldB, fieldC] +// +// Invalid: +// ( interfaceBase -> [fieldA] ) +// interfaceOneImplementingInterfaceBase -> [fieldA, fieldB] +// objectTypeImplementingInterfaceOne -> [fieldA, fieldC] +func (v *implementingTypesAreSupersetsVisitor) LeaveDocument(operation, definition *ast.Document) { + for typeName, interfacesNames := range v.implementingTypesWithInterfacesNames { + typeNameHasFields := true + typeNameFields, exists := v.implementingTypesWithFields[typeName] + if !exists || len(typeNameFields) == 0 { + typeNameHasFields = false + } + + typeNameFieldsLookupMap := map[string]bool{} + for i := 0; i < len(typeNameFields); i++ { + typeNameFieldsLookupMap[typeNameFields[i]] = true + } + + for i := 0; i < len(interfacesNames); i++ { + nodes, exists := v.definition.Index.NodesByNameStr(interfacesNames[i]) + if !exists { + continue + } + + var interfaceFieldRefs []int + for j := 0; j < len(nodes); j++ { + switch nodes[j].Kind { + case ast.NodeKindInterfaceTypeDefinition: + interfaceFieldRefs = append(interfaceFieldRefs, v.definition.InterfaceTypeDefinitions[nodes[j].Ref].FieldsDefinition.Refs...) + case ast.NodeKindInterfaceTypeExtension: + interfaceFieldRefs = append(interfaceFieldRefs, v.definition.InterfaceTypeExtensions[nodes[j].Ref].FieldsDefinition.Refs...) + default: + continue + } + } + + if !typeNameHasFields && len(interfaceFieldRefs) > 0 { + v.Report.AddExternalError(operationreport.ErrImplementingTypeDoesNotHaveFields([]byte(typeName))) + continue + } + + for j := 0; j < len(interfaceFieldRefs); j++ { + interfaceFieldName := v.definition.FieldDefinitionNameString(interfaceFieldRefs[j]) + if existsOnType := typeNameFieldsLookupMap[interfaceFieldName]; !existsOnType { + v.Report.AddExternalError(operationreport.ErrTypeDoesNotImplementFieldFromInterface( + []byte(typeName), + []byte(interfacesNames[i]), + []byte(interfaceFieldName), + )) + } + } + } + } +} + +func (v *implementingTypesAreSupersetsVisitor) EnterInterfaceTypeDefinition(ref int) { + interfacesRefs := v.definition.InterfaceTypeDefinitions[ref].ImplementsInterfaces.Refs + if len(interfacesRefs) == 0 { + return + } + + typeName := v.definition.InterfaceTypeDefinitionNameString(ref) + fieldDefinitionRefs := v.definition.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs + v.collectFieldsForTypeName(typeName, fieldDefinitionRefs) + v.collectInterfaceNamesForImplementedInterfacesByTypeName(typeName, interfacesRefs) +} + +func (v *implementingTypesAreSupersetsVisitor) EnterInterfaceTypeExtension(ref int) { + interfacesRefs := v.definition.InterfaceTypeExtensions[ref].ImplementsInterfaces.Refs + if len(interfacesRefs) == 0 { + return + } + + typeName := v.definition.InterfaceTypeExtensionNameString(ref) + fieldDefinitionRefs := v.definition.InterfaceTypeExtensions[ref].FieldsDefinition.Refs + + nodesWithTypeName, exists := v.definition.Index.NodesByNameStr(typeName) + if !exists { + return // if exists is false then something is really wrong + } + + for i := 0; i < len(nodesWithTypeName); i++ { + switch nodesWithTypeName[i].Kind { + case ast.NodeKindInterfaceTypeDefinition: + baseInterfaceRef := nodesWithTypeName[i].Ref + baseInterfaceTypeFieldRefs := v.definition.InterfaceTypeDefinitions[baseInterfaceRef].FieldsDefinition.Refs + for j := 0; j < len(baseInterfaceTypeFieldRefs); j++ { + fieldDefinitionRefs = append(fieldDefinitionRefs, baseInterfaceTypeFieldRefs[j]) + } + default: + continue + } + } + + v.collectFieldsForTypeName(typeName, fieldDefinitionRefs) + v.collectInterfaceNamesForImplementedInterfacesByTypeName(typeName, interfacesRefs) +} + +func (v *implementingTypesAreSupersetsVisitor) EnterObjectTypeDefinition(ref int) { + interfacesRefs := v.definition.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs + if len(interfacesRefs) == 0 { + return + } + + typeName := v.definition.ObjectTypeDefinitionNameString(ref) + fieldDefinitionRefs := v.definition.ObjectTypeDefinitions[ref].FieldsDefinition.Refs + v.collectFieldsForTypeName(typeName, fieldDefinitionRefs) + v.collectInterfaceNamesForImplementedInterfacesByTypeName(typeName, interfacesRefs) +} + +func (v *implementingTypesAreSupersetsVisitor) EnterObjectTypeExtension(ref int) { + interfacesRefs := v.definition.ObjectTypeExtensions[ref].ImplementsInterfaces.Refs + if len(interfacesRefs) == 0 { + return + } + + typeName := v.definition.ObjectTypeExtensionNameString(ref) + fieldDefinitionRefs := v.definition.ObjectTypeExtensions[ref].FieldsDefinition.Refs + + nodesWithTypeName, exists := v.definition.Index.NodesByNameStr(typeName) + if !exists { + return // if exists is false then something is really wrong + } + + for i := 0; i < len(nodesWithTypeName); i++ { + switch nodesWithTypeName[i].Kind { + case ast.NodeKindObjectTypeDefinition: + baseObjectTypeRef := nodesWithTypeName[i].Ref + baseObjectTypeInterfaceRefs := v.definition.ObjectTypeDefinitions[baseObjectTypeRef].FieldsDefinition.Refs + for j := 0; j < len(baseObjectTypeInterfaceRefs); j++ { + fieldDefinitionRefs = append(fieldDefinitionRefs, baseObjectTypeInterfaceRefs[j]) + } + default: + continue + } + } + + v.collectFieldsForTypeName(typeName, fieldDefinitionRefs) + v.collectInterfaceNamesForImplementedInterfacesByTypeName(typeName, interfacesRefs) +} + +// collectFieldsForTypeName will add all field names of a type which implements an interface to a slice in a +// map entry, so that it can be used as a lookup table later on. +// +// Example: +// interfaceOne -> [fieldA, fieldB] +// objectType -> [fieldA, fieldB, fieldC] +func (v *implementingTypesAreSupersetsVisitor) collectFieldsForTypeName(typeName string, fieldDefinitionRefs []int) { + if _, ok := v.implementingTypesWithFields[typeName]; !ok { + v.implementingTypesWithFields[typeName] = []string{} + } + + for i := 0; i < len(fieldDefinitionRefs); i++ { + fieldName := v.definition.FieldDefinitionNameString(fieldDefinitionRefs[i]) + + skipFieldName := false + for j := 0; j < len(v.implementingTypesWithFields[typeName]); j++ { + if fieldName == v.implementingTypesWithFields[typeName][j] { + skipFieldName = true + break + } + } + + if skipFieldName { + continue + } + + v.implementingTypesWithFields[typeName] = append(v.implementingTypesWithFields[typeName], fieldName) + } +} + +// collectInterfaceNamesForImplementedInterfacesByTypeName will add all interface names implemented by the given type, +// so it can be used to iterate over them when leaving the document. +// +// Example: +// interfaceOne -> [interfaceBase] +// objectType -> [interfaceOne, interfaceBase] +func (v *implementingTypesAreSupersetsVisitor) collectInterfaceNamesForImplementedInterfacesByTypeName(typeName string, typeRefs []int) { + if _, ok := v.implementingTypesWithInterfacesNames[typeName]; !ok { + v.implementingTypesWithInterfacesNames[typeName] = []string{} + } + + for i := 0; i < len(typeRefs); i++ { + interfaceName := v.definition.TypeNameString(typeRefs[i]) + skipInterfaceName := false + for j := 0; j < len(v.implementingTypesWithInterfacesNames[typeName]); j++ { + if interfaceName == v.implementingTypesWithInterfacesNames[typeName][j] { + skipInterfaceName = true + break + } + } + + if skipInterfaceName { + continue + } + + v.implementingTypesWithInterfacesNames[typeName] = append(v.implementingTypesWithInterfacesNames[typeName], interfaceName) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_known_type_names.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_known_type_names.go new file mode 100644 index 00000000000..9d1e59f8263 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_known_type_names.go @@ -0,0 +1,121 @@ +package astvalidation + +import ( + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func KnownTypeNames() Rule { + return func(walker *astvisitor.Walker) { + visitor := &knownTypeNamesVisitor{ + Walker: walker, + } + + walker.RegisterDocumentVisitor(visitor) + walker.RegisterEnterRootOperationTypeDefinitionVisitor(visitor) + walker.RegisterEnterFieldDefinitionVisitor(visitor) + walker.RegisterEnterUnionMemberTypeVisitor(visitor) + walker.RegisterEnterInputValueDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterEnterScalarTypeDefinitionVisitor(visitor) + walker.RegisterEnterUnionTypeDefinitionVisitor(visitor) + walker.RegisterEnterInputObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterEnumTypeDefinitionVisitor(visitor) + } +} + +type knownTypeNamesVisitor struct { + *astvisitor.Walker + definition *ast.Document + definedTypeNameHashs map[uint64]bool + referencedTypeNames map[uint64][]byte +} + +func (u *knownTypeNamesVisitor) EnterDocument(operation, _ *ast.Document) { + u.definition = operation + u.definedTypeNameHashs = make(map[uint64]bool) + u.referencedTypeNames = make(map[uint64][]byte) +} + +func (u *knownTypeNamesVisitor) LeaveDocument(_, _ *ast.Document) { + for referencedTypeNameHash, referencedTypeName := range u.referencedTypeNames { + if !u.definedTypeNameHashs[referencedTypeNameHash] { + u.Report.AddExternalError(operationreport.ErrTypeUndefined(referencedTypeName)) + continue + } + } + +} + +func (u *knownTypeNamesVisitor) EnterRootOperationTypeDefinition(ref int) { + referencedTypeName := u.definition.Input.ByteSlice(u.definition.RootOperationTypeDefinitions[ref].NamedType.Name) + u.saveReferencedTypeName(referencedTypeName) +} + +func (u *knownTypeNamesVisitor) EnterFieldDefinition(ref int) { + referencedTypeRef := u.definition.ResolveUnderlyingType(u.definition.FieldDefinitions[ref].Type) + referencedTypeName := u.definition.TypeNameBytes(referencedTypeRef) + u.saveReferencedTypeName(referencedTypeName) +} + +func (u *knownTypeNamesVisitor) EnterUnionMemberType(ref int) { + referencedTypeName := u.definition.TypeNameBytes(ref) + u.saveReferencedTypeName(referencedTypeName) +} + +func (u *knownTypeNamesVisitor) EnterInputValueDefinition(ref int) { + referencedTypeRef := u.definition.InputValueDefinitions[ref].Type + referencedTypeName := u.definition.TypeNameBytes(referencedTypeRef) + u.saveReferencedTypeName(referencedTypeName) +} + +func (u *knownTypeNamesVisitor) EnterObjectTypeDefinition(ref int) { + typeName := u.definition.ObjectTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterObjectTypeExtension(ref int) { + typeName := u.definition.ObjectTypeExtensionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterInterfaceTypeDefinition(ref int) { + typeName := u.definition.InterfaceTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterScalarTypeDefinition(ref int) { + typeName := u.definition.ScalarTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterUnionTypeDefinition(ref int) { + typeName := u.definition.UnionTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterInputObjectTypeDefinition(ref int) { + typeName := u.definition.InputObjectTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) EnterEnumTypeDefinition(ref int) { + typeName := u.definition.EnumTypeDefinitionNameBytes(ref) + u.saveTypeName(typeName) +} + +func (u *knownTypeNamesVisitor) saveTypeName(typeName ast.ByteSlice) { + u.definedTypeNameHashs[xxhash.Sum64(typeName)] = true +} + +func (u *knownTypeNamesVisitor) saveReferencedTypeName(referencedTypeName ast.ByteSlice) { + if len(referencedTypeName) == 0 { + return + } + u.referencedTypeNames[xxhash.Sum64(referencedTypeName)] = referencedTypeName +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_populated_type_bodies.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_populated_type_bodies.go new file mode 100644 index 00000000000..9763d8a9c97 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_populated_type_bodies.go @@ -0,0 +1,116 @@ +package astvalidation + +import ( + "bytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type populatedTypeBodiesVisitor struct { + *astvisitor.Walker + definition *ast.Document +} + +func PopulatedTypeBodies() Rule { + return func(walker *astvisitor.Walker) { + visitor := &populatedTypeBodiesVisitor{ + Walker: walker, + definition: nil, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterEnumTypeDefinitionVisitor(visitor) + walker.RegisterEnterEnumTypeExtensionVisitor(visitor) + walker.RegisterEnterInputObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterInputObjectTypeExtensionVisitor(visitor) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + } +} + +func (p *populatedTypeBodiesVisitor) EnterDocument(operation, _ *ast.Document) { + p.definition = operation +} + +func (p populatedTypeBodiesVisitor) EnterEnumTypeDefinition(ref int) { + if !p.definition.EnumTypeDefinitions[ref].HasEnumValuesDefinition { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("enum", p.definition.EnumTypeDefinitionNameString(ref))) + return + } +} + +func (p *populatedTypeBodiesVisitor) EnterEnumTypeExtension(ref int) { + if !p.definition.EnumTypeExtensions[ref].HasEnumValuesDefinition { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("enum extension", p.definition.EnumTypeExtensionNameString(ref))) + return + } +} + +func (p populatedTypeBodiesVisitor) EnterInputObjectTypeDefinition(ref int) { + if !p.definition.InputObjectTypeDefinitions[ref].HasInputFieldsDefinition { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("input", p.definition.InputObjectTypeDefinitionNameString(ref))) + return + } +} + +func (p *populatedTypeBodiesVisitor) EnterInputObjectTypeExtension(ref int) { + if !p.definition.InputObjectTypeExtensions[ref].HasInputFieldsDefinition { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("input extension", p.definition.InputObjectTypeExtensionNameString(ref))) + return + } +} + +func (p populatedTypeBodiesVisitor) EnterInterfaceTypeDefinition(ref int) { + switch p.definition.InterfaceTypeDefinitions[ref].HasFieldDefinitions { + case true: + if !p.doesTypeOnlyContainReservedFields(p.definition.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs) { + return + } + fallthrough + case false: + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("interface", p.definition.InterfaceTypeDefinitionNameString(ref))) + return + } +} + +func (p *populatedTypeBodiesVisitor) EnterInterfaceTypeExtension(ref int) { + if !p.definition.InterfaceTypeExtensions[ref].HasFieldDefinitions { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("interface extension", p.definition.InterfaceTypeExtensionNameString(ref))) + return + } +} + +func (p populatedTypeBodiesVisitor) EnterObjectTypeDefinition(ref int) { + nameBytes := p.definition.ObjectTypeDefinitionNameBytes(ref) + object := p.definition.ObjectTypeDefinitions[ref] + switch object.HasFieldDefinitions { + case true: + if ast.IsRootType(nameBytes) || !p.doesTypeOnlyContainReservedFields(p.definition.ObjectTypeDefinitions[ref].FieldsDefinition.Refs) { + return + } + fallthrough + case false: + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("object", string(nameBytes))) + return + } +} + +func (p *populatedTypeBodiesVisitor) EnterObjectTypeExtension(ref int) { + if !p.definition.ObjectTypeExtensions[ref].HasFieldDefinitions { + p.Report.AddExternalError(operationreport.ErrTypeBodyMustNotBeEmpty("object extension", p.definition.ObjectTypeExtensionNameString(ref))) + return + } +} + +func (p *populatedTypeBodiesVisitor) doesTypeOnlyContainReservedFields(refs []int) bool { + for _, fieldRef := range refs { + fieldNameBytes := p.definition.FieldDefinitionNameBytes(fieldRef) + if len(fieldNameBytes) < 2 || !bytes.HasPrefix(fieldNameBytes, reservedFieldPrefix) { + return false + } + } + return true +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_require_defined_types_for_extensions.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_require_defined_types_for_extensions.go new file mode 100644 index 00000000000..f8a1a96018c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_require_defined_types_for_extensions.go @@ -0,0 +1,89 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func RequireDefinedTypesForExtensions() Rule { + return func(walker *astvisitor.Walker) { + visitor := &requireDefinedTypesForExtensionsVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterScalarTypeExtensionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + walker.RegisterEnterInterfaceTypeExtensionVisitor(visitor) + walker.RegisterEnterUnionTypeExtensionVisitor(visitor) + walker.RegisterEnterEnumTypeExtensionVisitor(visitor) + walker.RegisterEnterInputObjectTypeExtensionVisitor(visitor) + } +} + +type requireDefinedTypesForExtensionsVisitor struct { + *astvisitor.Walker + definition *ast.Document +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterDocument(operation, definition *ast.Document) { + r.definition = operation +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterScalarTypeExtension(ref int) { + name := r.definition.ScalarTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindScalarTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrScalarTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterObjectTypeExtension(ref int) { + name := r.definition.ObjectTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindObjectTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterInterfaceTypeExtension(ref int) { + name := r.definition.InterfaceTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindInterfaceTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrInterfaceTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterUnionTypeExtension(ref int) { + name := r.definition.UnionTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindUnionTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrUnionTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterEnumTypeExtension(ref int) { + name := r.definition.EnumTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindEnumTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrEnumTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) EnterInputObjectTypeExtension(ref int) { + name := r.definition.InputObjectTypeExtensionNameBytes(ref) + if !r.extensionIsValidForNodeKind(name, ast.NodeKindInputObjectTypeDefinition) { + r.Report.AddExternalError(operationreport.ErrInputObjectTypeUndefined(name)) + } +} + +func (r *requireDefinedTypesForExtensionsVisitor) extensionIsValidForNodeKind(name ast.ByteSlice, definitionNodeKind ast.NodeKind) bool { + nodes, exists := r.definition.Index.NodesByNameBytes(name) + if !exists { + return true + } + + for i := 0; i < len(nodes); i++ { + if nodes[i].Kind == definitionNodeKind { + return true + } + } + + return false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_enum_value_names.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_enum_value_names.go new file mode 100644 index 00000000000..9b408053749 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_enum_value_names.go @@ -0,0 +1,92 @@ +package astvalidation + +import ( + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type hashedEnumValueNames map[uint64]bool + +func UniqueEnumValueNames() Rule { + return func(walker *astvisitor.Walker) { + visitor := &uniqueEnumValueNamesVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterEnumValueDefinitionVisitor(visitor) + walker.RegisterEnumTypeDefinitionVisitor(visitor) + walker.RegisterEnumTypeExtensionVisitor(visitor) + } +} + +type uniqueEnumValueNamesVisitor struct { + *astvisitor.Walker + definition *ast.Document + currentEnumName ast.ByteSlice + currentEnumHash uint64 + usedEnumValues map[uint64]hashedEnumValueNames +} + +func (u *uniqueEnumValueNamesVisitor) EnterDocument(operation, definition *ast.Document) { + u.definition = operation + u.currentEnumName = u.currentEnumName[:0] + u.currentEnumHash = 0 + u.usedEnumValues = make(map[uint64]hashedEnumValueNames) +} + +func (u *uniqueEnumValueNamesVisitor) EnterEnumValueDefinition(ref int) { + enumValueName := u.definition.EnumValueDefinitionNameBytes(ref) + u.checkEnumValueName(enumValueName) +} + +func (u *uniqueEnumValueNamesVisitor) EnterEnumTypeDefinition(ref int) { + enumName := u.definition.EnumTypeDefinitionNameBytes(ref) + u.setCurrentEnum(enumName) +} + +func (u *uniqueEnumValueNamesVisitor) LeaveEnumTypeDefinition(ref int) { + u.unsetCurrentEnum() +} + +func (u *uniqueEnumValueNamesVisitor) EnterEnumTypeExtension(ref int) { + enumName := u.definition.EnumTypeExtensionNameBytes(ref) + u.setCurrentEnum(enumName) +} + +func (u *uniqueEnumValueNamesVisitor) LeaveEnumTypeExtension(ref int) { + u.unsetCurrentEnum() +} + +func (u *uniqueEnumValueNamesVisitor) setCurrentEnum(enumName ast.ByteSlice) { + u.currentEnumName = enumName + u.currentEnumHash = xxhash.Sum64(enumName) +} + +func (u *uniqueEnumValueNamesVisitor) unsetCurrentEnum() { + u.currentEnumName = u.currentEnumName[:0] + u.currentEnumHash = 0 +} + +func (u *uniqueEnumValueNamesVisitor) checkEnumValueName(enumValueName ast.ByteSlice) { + if len(u.currentEnumName) == 0 || u.currentEnumHash == 0 { + return + } + + enumValueNameHash := xxhash.Sum64(enumValueName) + enumValueNames, ok := u.usedEnumValues[u.currentEnumHash] + if !ok { + enumValueNames = make(hashedEnumValueNames) + } + + if enumValueNames[enumValueNameHash] { + u.Report.AddExternalError(operationreport.ErrEnumValueNameMustBeUnique(u.currentEnumName, enumValueName)) + return + } + + enumValueNames[enumValueNameHash] = true + u.usedEnumValues[u.currentEnumHash] = enumValueNames +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_field_definition_names.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_field_definition_names.go new file mode 100644 index 00000000000..a77f411703c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_field_definition_names.go @@ -0,0 +1,155 @@ +package astvalidation + +import ( + "bytes" + + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type hashedFieldNames map[uint64]bool + +func UniqueFieldDefinitionNames() Rule { + return func(walker *astvisitor.Walker) { + visitor := &uniqueFieldDefinitionNamesVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterFieldDefinitionVisitor(visitor) + walker.RegisterEnterInputValueDefinitionVisitor(visitor) + walker.RegisterObjectTypeDefinitionVisitor(visitor) + walker.RegisterObjectTypeExtensionVisitor(visitor) + walker.RegisterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterInterfaceTypeExtensionVisitor(visitor) + walker.RegisterInputObjectTypeDefinitionVisitor(visitor) + walker.RegisterInputObjectTypeExtensionVisitor(visitor) + } +} + +type uniqueFieldDefinitionNamesVisitor struct { + *astvisitor.Walker + definition *ast.Document + currentTypeName ast.ByteSlice + currentTypeNameHash uint64 + currentTypeKind ast.NodeKind + usedFieldNames map[uint64]hashedFieldNames // map of hashed type names containing a map of hashed field names +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterDocument(operation, _ *ast.Document) { + u.definition = operation + u.currentTypeName = u.currentTypeName[:0] + u.currentTypeNameHash = 0 + u.currentTypeKind = ast.NodeKindUnknown + u.usedFieldNames = make(map[uint64]hashedFieldNames) +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterFieldDefinition(ref int) { + fieldName := u.definition.FieldDefinitionNameBytes(ref) + u.checkField(fieldName) +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterInputValueDefinition(ref int) { + if u.currentTypeKind != ast.NodeKindInputObjectTypeDefinition && u.currentTypeKind != ast.NodeKindInputObjectTypeExtension { + return + } + + name := u.definition.InputValueDefinitionNameBytes(ref) + u.checkField(name) +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterObjectTypeDefinition(ref int) { + typeName := u.definition.ObjectTypeDefinitionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindObjectTypeDefinition) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveObjectTypeDefinition(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterObjectTypeExtension(ref int) { + typeName := u.definition.ObjectTypeExtensionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindObjectTypeExtension) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveObjectTypeExtension(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterInterfaceTypeDefinition(ref int) { + typeName := u.definition.InterfaceTypeDefinitionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindInterfaceTypeDefinition) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveInterfaceTypeDefinition(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterInterfaceTypeExtension(ref int) { + typeName := u.definition.InterfaceTypeExtensionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindInterfaceTypeExtension) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveInterfaceTypeExtension(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterInputObjectTypeDefinition(ref int) { + typeName := u.definition.InputObjectTypeDefinitionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindInputObjectTypeDefinition) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveInputObjectTypeDefinition(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) EnterInputObjectTypeExtension(ref int) { + typeName := u.definition.InputObjectTypeExtensionNameBytes(ref) + u.setCurrentTypeName(typeName, ast.NodeKindInputObjectTypeExtension) +} + +func (u *uniqueFieldDefinitionNamesVisitor) LeaveInputObjectTypeExtension(_ int) { + u.unsetCurrentTypeName() +} + +func (u *uniqueFieldDefinitionNamesVisitor) setCurrentTypeName(typeName ast.ByteSlice, kind ast.NodeKind) { + if bytes.HasPrefix(typeName, []byte("__")) { // ignore graphql reserved types + return + } + + u.currentTypeName = typeName + u.currentTypeNameHash = xxhash.Sum64(typeName) + u.currentTypeKind = kind +} + +func (u *uniqueFieldDefinitionNamesVisitor) unsetCurrentTypeName() { + u.currentTypeName = u.currentTypeName[:0] + u.currentTypeNameHash = 0 + u.currentTypeKind = ast.NodeKindUnknown +} + +func (u *uniqueFieldDefinitionNamesVisitor) checkField(fieldName ast.ByteSlice) { + if bytes.HasPrefix(fieldName, reservedFieldPrefix) { // don't validate graphql reserved fields + return + } + + if len(u.currentTypeName) == 0 || u.currentTypeNameHash == 0 || u.currentTypeKind == ast.NodeKindUnknown { + return + } + + fieldNames, ok := u.usedFieldNames[u.currentTypeNameHash] + if !ok { + fieldNames = make(hashedFieldNames) + } + + if fieldNames[xxhash.Sum64(fieldName)] { + u.Report.AddExternalError(operationreport.ErrFieldNameMustBeUniqueOnType(fieldName, u.currentTypeName)) + return + } + + fieldNames[xxhash.Sum64(fieldName)] = true + u.usedFieldNames[u.currentTypeNameHash] = fieldNames +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_operation_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_operation_types.go new file mode 100644 index 00000000000..6f29e2c357e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_operation_types.go @@ -0,0 +1,54 @@ +package astvalidation + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func UniqueOperationTypes() Rule { + return func(walker *astvisitor.Walker) { + visitor := &uniqueOperationTypesVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterRootOperationTypeDefinitionVisitor(visitor) + } +} + +type uniqueOperationTypesVisitor struct { + *astvisitor.Walker + definition *ast.Document + queryIsDefined bool + mutationIsDefined bool + subscriptionIsDefined bool +} + +func (u *uniqueOperationTypesVisitor) EnterDocument(operation, definition *ast.Document) { + u.definition = operation + u.queryIsDefined = false + u.mutationIsDefined = false + u.subscriptionIsDefined = false +} + +func (u *uniqueOperationTypesVisitor) EnterRootOperationTypeDefinition(ref int) { + operationType := u.definition.RootOperationTypeDefinitions[ref].OperationType + switch operationType { + case ast.OperationTypeQuery: + if u.queryIsDefined { + u.Report.AddExternalError(operationreport.ErrOnlyOneQueryTypeAllowed()) + } + u.queryIsDefined = true + case ast.OperationTypeMutation: + if u.mutationIsDefined { + u.Report.AddExternalError(operationreport.ErrOnlyOneMutationTypeAllowed()) + } + u.mutationIsDefined = true + case ast.OperationTypeSubscription: + if u.subscriptionIsDefined { + u.Report.AddExternalError(operationreport.ErrOnlyOneSubscriptionTypeAllowed()) + } + u.subscriptionIsDefined = true + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_type_names.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_type_names.go new file mode 100644 index 00000000000..e7cb2d44c1a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_type_names.go @@ -0,0 +1,75 @@ +package astvalidation + +import ( + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func UniqueTypeNames() Rule { + return func(walker *astvisitor.Walker) { + visitor := &uniqueTypeNamesVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterScalarTypeDefinitionVisitor(visitor) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + walker.RegisterEnterUnionTypeDefinitionVisitor(visitor) + walker.RegisterEnterEnumTypeDefinitionVisitor(visitor) + walker.RegisterEnterInputObjectTypeDefinitionVisitor(visitor) + } +} + +type uniqueTypeNamesVisitor struct { + *astvisitor.Walker + definition *ast.Document + usedTypeNamesAsHash map[uint64]bool +} + +func (u *uniqueTypeNamesVisitor) EnterDocument(operation, definition *ast.Document) { + u.definition = operation + u.usedTypeNamesAsHash = make(map[uint64]bool) +} + +func (u *uniqueTypeNamesVisitor) EnterObjectTypeDefinition(ref int) { + typeName := u.definition.ObjectTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) EnterScalarTypeDefinition(ref int) { + typeName := u.definition.ScalarTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) EnterInterfaceTypeDefinition(ref int) { + typeName := u.definition.InterfaceTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) EnterUnionTypeDefinition(ref int) { + typeName := u.definition.UnionTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) EnterEnumTypeDefinition(ref int) { + typeName := u.definition.EnumTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) EnterInputObjectTypeDefinition(ref int) { + typeName := u.definition.InputObjectTypeDefinitionNameBytes(ref) + u.checkTypeName(typeName) +} + +func (u *uniqueTypeNamesVisitor) checkTypeName(typeName ast.ByteSlice) { + hashedTypeName := xxhash.Sum64(typeName) + if u.usedTypeNamesAsHash[hashedTypeName] { + u.Report.AddExternalError(operationreport.ErrTypeNameMustBeUnique(typeName)) + return + } + u.usedTypeNamesAsHash[hashedTypeName] = true +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_union_member_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_union_member_types.go new file mode 100644 index 00000000000..32e76907049 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/rule_unique_union_member_types.go @@ -0,0 +1,93 @@ +package astvalidation + +import ( + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type hashedMembers map[uint64]bool + +type uniqueUnionMemberTypesVisitor struct { + *astvisitor.Walker + definition *ast.Document + currentUnionName ast.ByteSlice + currentUnionHash uint64 + presentMembers map[uint64]hashedMembers +} + +func UniqueUnionMemberTypes() Rule { + return func(walker *astvisitor.Walker) { + visitor := &uniqueUnionMemberTypesVisitor{ + Walker: walker, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterUnionTypeDefinitionVisitor(visitor) + walker.RegisterEnterUnionMemberTypeVisitor(visitor) + walker.RegisterUnionTypeDefinitionVisitor(visitor) + walker.RegisterUnionTypeExtensionVisitor(visitor) + } +} + +func (u *uniqueUnionMemberTypesVisitor) EnterDocument(operation, _ *ast.Document) { + u.definition = operation + u.currentUnionName = u.currentUnionName[:0] + u.currentUnionHash = 0 + u.presentMembers = make(map[uint64]hashedMembers) +} + +func (u *uniqueUnionMemberTypesVisitor) EnterUnionTypeDefinition(ref int) { + unionName := u.definition.UnionTypeDefinitionNameBytes(ref) + u.setCurrentUnion(unionName) +} + +func (u *uniqueUnionMemberTypesVisitor) LeaveUnionTypeDefinition(_ int) { + u.unsetCurrentUnion() +} + +func (u *uniqueUnionMemberTypesVisitor) EnterUnionMemberType(ref int) { + memberName := u.definition.TypeNameBytes(ref) + u.checkMemberName(memberName) +} + +func (u *uniqueUnionMemberTypesVisitor) EnterUnionTypeExtension(ref int) { + unionName := u.definition.UnionTypeExtensionNameBytes(ref) + u.setCurrentUnion(unionName) +} + +func (u *uniqueUnionMemberTypesVisitor) LeaveUnionTypeExtension(_ int) { + u.unsetCurrentUnion() +} + +func (u *uniqueUnionMemberTypesVisitor) setCurrentUnion(unionName ast.ByteSlice) { + u.currentUnionName = unionName + u.currentUnionHash = xxhash.Sum64(unionName) +} + +func (u *uniqueUnionMemberTypesVisitor) unsetCurrentUnion() { + u.currentUnionName = u.currentUnionName[:0] + u.currentUnionHash = 0 +} + +func (u *uniqueUnionMemberTypesVisitor) checkMemberName(memberName ast.ByteSlice) { + if len(u.currentUnionName) == 0 || u.currentUnionHash == 0 { + return + } + + memberNameHash := xxhash.Sum64(memberName) + memberNames, ok := u.presentMembers[u.currentUnionHash] + if !ok { + memberNames = make(hashedMembers) + } + + if memberNames[memberNameHash] { + u.Report.AddExternalError(operationreport.ErrUnionMembersMustBeUnique(u.currentUnionName, memberName)) + return + } + + memberNames[memberNameHash] = true + u.presentMembers[u.currentUnionHash] = memberNames +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state.go new file mode 100644 index 00000000000..00f7eb7160b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state.go @@ -0,0 +1,12 @@ +//go:generate stringer -type=ValidationState -output validation_state_string.go + +package astvalidation + +// ValidationState is the outcome of a validation +type ValidationState int + +const ( + UnknownState ValidationState = iota + Valid + Invalid +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state_string.go new file mode 100644 index 00000000000..686555d0129 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation/validation_state_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=ValidationState -output validation_state_string.go"; DO NOT EDIT. + +package astvalidation + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnknownState-0] + _ = x[Valid-1] + _ = x[Invalid-2] +} + +const _ValidationState_name = "UnknownStateValidInvalid" + +var _ValidationState_index = [...]uint8{0, 12, 17, 24} + +func (i ValidationState) String() string { + if i < 0 || i >= ValidationState(len(_ValidationState_index)-1) { + return "ValidationState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValidationState_name[_ValidationState_index[i]:_ValidationState_index[i+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/astvisitor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/astvisitor.go new file mode 100644 index 00000000000..49c522c6173 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/astvisitor.go @@ -0,0 +1,8 @@ +// Package astvisitor enables efficient and powerful traversal of GraphQL document AST's. +// +// Visitor has more options to configure the behaviour and offers more meta data than SimpleVisitor. +// SimpleVisitor on the other hand is more performant. +// +// If all Nodes should be visited and not much meta data is needed, go with SimpleVisitor. +// If you only need to visit a subset of Nodes or want specific meta data, e.g. TypeDefinitions you should go with Visitor. +package astvisitor diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/simplevisitor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/simplevisitor.go new file mode 100644 index 00000000000..b1b6cec36d9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/simplevisitor.go @@ -0,0 +1,787 @@ +package astvisitor + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +type SimpleWalker struct { + err error + document *ast.Document + Depth int + Ancestors []ast.Node + visitor AllNodesVisitor + SelectionsBefore []int + SelectionsAfter []int +} + +func NewSimpleWalker(ancestorSize int) SimpleWalker { + return SimpleWalker{ + Ancestors: make([]ast.Node, 0, ancestorSize), + } +} + +func (w *SimpleWalker) SetVisitor(visitor AllNodesVisitor) { + w.visitor = visitor +} + +func (w *SimpleWalker) Walk(document, definition *ast.Document) error { + return w.WalkDocument(document) +} + +func (w *SimpleWalker) WalkDocument(document *ast.Document) error { + + if w.visitor == nil { + return fmt.Errorf("visitor must not be nil, use SetVisitor()") + } + + w.err = nil + w.Ancestors = w.Ancestors[:0] + w.document = document + w.Depth = 0 + w.walk() + return w.err +} + +func (w *SimpleWalker) appendAncestor(ref int, kind ast.NodeKind) { + w.Ancestors = append(w.Ancestors, ast.Node{ + Kind: kind, + Ref: ref, + }) +} + +func (w *SimpleWalker) removeLastAncestor() { + w.Ancestors = w.Ancestors[:len(w.Ancestors)-1] +} + +func (w *SimpleWalker) increaseDepth() { + w.Depth++ +} + +func (w *SimpleWalker) decreaseDepth() { + w.Depth-- +} + +func (w *SimpleWalker) walk() { + + if w.document == nil { + w.err = fmt.Errorf("document must not be nil") + return + } + + w.visitor.EnterDocument(w.document, nil) + + for i := range w.document.RootNodes { + isLast := i == len(w.document.RootNodes)-1 + switch w.document.RootNodes[i].Kind { + case ast.NodeKindOperationDefinition: + w.walkOperationDefinition(w.document.RootNodes[i].Ref, isLast) + case ast.NodeKindFragmentDefinition: + w.walkFragmentDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindSchemaDefinition: + w.walkSchemaDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindSchemaExtension: + w.walkSchemaExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindDirectiveDefinition: + w.walkDirectiveDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindObjectTypeDefinition: + w.walkObjectTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindObjectTypeExtension: + w.walkObjectTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindInterfaceTypeDefinition: + w.walkInterfaceTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindInterfaceTypeExtension: + w.walkInterfaceTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindScalarTypeDefinition: + w.walkScalarTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindScalarTypeExtension: + w.walkScalarTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindUnionTypeDefinition: + w.walkUnionTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindUnionTypeExtension: + w.walkUnionTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindEnumTypeDefinition: + w.walkEnumTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindEnumTypeExtension: + w.walkEnumTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindInputObjectTypeDefinition: + w.walkInputObjectTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindInputObjectTypeExtension: + w.walkInputObjectTypeExtension(w.document.RootNodes[i].Ref) + } + } + + w.visitor.LeaveDocument(w.document, nil) +} + +func (w *SimpleWalker) walkOperationDefinition(ref int, isLastRootNode bool) { + w.increaseDepth() + + w.visitor.EnterOperationDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindOperationDefinition) + + if w.document.OperationDefinitions[ref].HasVariableDefinitions { + for _, i := range w.document.OperationDefinitions[ref].VariableDefinitions.Refs { + w.walkVariableDefinition(i) + } + } + + if w.document.OperationDefinitions[ref].HasDirectives { + for _, i := range w.document.OperationDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.OperationDefinitions[ref].HasSelections { + w.walkSelectionSet(w.document.OperationDefinitions[ref].SelectionSet) + } + + w.removeLastAncestor() + + w.visitor.LeaveOperationDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkVariableDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterVariableDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindVariableDefinition) + + if w.document.VariableDefinitions[ref].HasDirectives { + for _, i := range w.document.VariableDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveVariableDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkSelectionSet(ref int) { + w.increaseDepth() + + w.visitor.EnterSelectionSet(ref) + + w.appendAncestor(ref, ast.NodeKindSelectionSet) + + for i, j := range w.document.SelectionSets[ref].SelectionRefs { + + w.SelectionsBefore = w.document.SelectionSets[ref].SelectionRefs[:i] + w.SelectionsAfter = w.document.SelectionSets[ref].SelectionRefs[i+1:] + + switch w.document.Selections[j].Kind { + case ast.SelectionKindField: + w.walkField(w.document.Selections[j].Ref) + case ast.SelectionKindFragmentSpread: + w.walkFragmentSpread(w.document.Selections[j].Ref) + case ast.SelectionKindInlineFragment: + w.walkInlineFragment(w.document.Selections[j].Ref) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveSelectionSet(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkField(ref int) { + w.increaseDepth() + + selectionsBefore := w.SelectionsBefore + selectionsAfter := w.SelectionsAfter + w.visitor.EnterField(ref) + + w.appendAncestor(ref, ast.NodeKindField) + + if len(w.document.Fields[ref].Arguments.Refs) != 0 { + for _, i := range w.document.Fields[ref].Arguments.Refs { + w.walkArgument(i) + } + } + + if w.document.Fields[ref].HasDirectives { + for _, i := range w.document.Fields[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.Fields[ref].HasSelections { + w.walkSelectionSet(w.document.Fields[ref].SelectionSet) + } + + w.removeLastAncestor() + + w.SelectionsBefore = selectionsBefore + w.SelectionsAfter = selectionsAfter + w.visitor.LeaveField(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkDirective(ref int) { + w.increaseDepth() + + w.visitor.EnterDirective(ref) + + w.appendAncestor(ref, ast.NodeKindDirective) + + if w.document.Directives[ref].HasArguments { + for _, i := range w.document.Directives[ref].Arguments.Refs { + w.walkArgument(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveDirective(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkArgument(ref int) { + w.increaseDepth() + + w.visitor.EnterArgument(ref) + w.visitor.LeaveArgument(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkFragmentSpread(ref int) { + w.increaseDepth() + + w.visitor.EnterFragmentSpread(ref) + w.visitor.LeaveFragmentSpread(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInlineFragment(ref int) { + w.increaseDepth() + + selectionsBefore := w.SelectionsBefore + selectionsAfter := w.SelectionsAfter + w.visitor.EnterInlineFragment(ref) + + w.appendAncestor(ref, ast.NodeKindInlineFragment) + + if w.document.InlineFragments[ref].HasDirectives { + for _, i := range w.document.InlineFragments[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InlineFragments[ref].HasSelections { + w.walkSelectionSet(w.document.InlineFragments[ref].SelectionSet) + } + + w.removeLastAncestor() + + w.SelectionsBefore = selectionsBefore + w.SelectionsAfter = selectionsAfter + w.visitor.LeaveInlineFragment(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkFragmentDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterFragmentDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindFragmentDefinition) + + if w.document.FragmentDefinitions[ref].HasDirectives { + for _, i := range w.document.FragmentDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.FragmentDefinitions[ref].HasSelections { + w.walkSelectionSet(w.document.FragmentDefinitions[ref].SelectionSet) + } + + w.removeLastAncestor() + + w.visitor.LeaveFragmentDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkObjectTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterObjectTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindObjectTypeDefinition) + + if w.document.ObjectTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.ObjectTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.ObjectTypeDefinitions[ref].HasFieldDefinitions { + for _, i := range w.document.ObjectTypeDefinitions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveObjectTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkObjectTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterObjectTypeExtension(ref) + w.appendAncestor(ref, ast.NodeKindObjectTypeExtension) + + if w.document.ObjectTypeExtensions[ref].HasDirectives { + for _, i := range w.document.ObjectTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.ObjectTypeExtensions[ref].HasFieldDefinitions { + for _, i := range w.document.ObjectTypeExtensions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveObjectTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkFieldDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterFieldDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindFieldDefinition) + + if w.document.FieldDefinitions[ref].HasArgumentsDefinitions { + for _, i := range w.document.FieldDefinitions[ref].ArgumentsDefinition.Refs { + w.walkInputValueDefinition(i) + } + } + + if w.document.FieldDefinitions[ref].HasDirectives { + for _, i := range w.document.FieldDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveFieldDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInputValueDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterInputValueDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindInputValueDefinition) + + if w.document.InputValueDefinitions[ref].HasDirectives { + for _, i := range w.document.InputValueDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveInputValueDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInterfaceTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterInterfaceTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindInterfaceTypeDefinition) + + if w.document.InterfaceTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.InterfaceTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InterfaceTypeDefinitions[ref].HasFieldDefinitions { + for _, i := range w.document.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveInterfaceTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInterfaceTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterInterfaceTypeExtension(ref) + + w.appendAncestor(ref, ast.NodeKindInterfaceTypeExtension) + + if w.document.InterfaceTypeExtensions[ref].HasDirectives { + for _, i := range w.document.InterfaceTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InterfaceTypeExtensions[ref].HasFieldDefinitions { + for _, i := range w.document.InterfaceTypeExtensions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveInterfaceTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkScalarTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterScalarTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindScalarTypeDefinition) + + if w.document.ScalarTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.ScalarTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveScalarTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkScalarTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterScalarTypeExtension(ref) + + w.appendAncestor(ref, ast.NodeKindScalarTypeExtension) + + if w.document.ScalarTypeExtensions[ref].HasDirectives { + for _, i := range w.document.ScalarTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveScalarTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkUnionTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterUnionTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindUnionTypeDefinition) + + if w.document.UnionTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.UnionTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.UnionTypeDefinitions[ref].HasUnionMemberTypes { + for _, i := range w.document.UnionTypeDefinitions[ref].UnionMemberTypes.Refs { + w.walkUnionMemberType(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveUnionTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkUnionTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterUnionTypeExtension(ref) + + w.appendAncestor(ref, ast.NodeKindUnionTypeExtension) + + if w.document.UnionTypeExtensions[ref].HasDirectives { + for _, i := range w.document.UnionTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.UnionTypeExtensions[ref].HasUnionMemberTypes { + for _, i := range w.document.UnionTypeExtensions[ref].UnionMemberTypes.Refs { + w.walkUnionMemberType(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveUnionTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkUnionMemberType(ref int) { + w.increaseDepth() + + w.visitor.EnterUnionMemberType(ref) + + w.visitor.LeaveUnionMemberType(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkEnumTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterEnumTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindEnumTypeDefinition) + + if w.document.EnumTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.EnumTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.EnumTypeDefinitions[ref].HasEnumValuesDefinition { + for _, i := range w.document.EnumTypeDefinitions[ref].EnumValuesDefinition.Refs { + w.walkEnumValueDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveEnumTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkEnumTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterEnumTypeExtension(ref) + + w.appendAncestor(ref, ast.NodeKindEnumTypeExtension) + + if w.document.EnumTypeExtensions[ref].HasDirectives { + for _, i := range w.document.EnumTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.EnumTypeExtensions[ref].HasEnumValuesDefinition { + for _, i := range w.document.EnumTypeExtensions[ref].EnumValuesDefinition.Refs { + w.walkEnumValueDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveEnumTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkEnumValueDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterEnumValueDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindEnumValueDefinition) + + if w.document.EnumValueDefinitions[ref].HasDirectives { + for _, i := range w.document.EnumValueDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveEnumValueDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInputObjectTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterInputObjectTypeDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindInputObjectTypeDefinition) + + if w.document.InputObjectTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.InputObjectTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InputObjectTypeDefinitions[ref].HasInputFieldsDefinition { + for _, i := range w.document.InputObjectTypeDefinitions[ref].InputFieldsDefinition.Refs { + w.walkInputValueDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveInputObjectTypeDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkInputObjectTypeExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterInputObjectTypeExtension(ref) + + w.appendAncestor(ref, ast.NodeKindInputObjectTypeExtension) + + if w.document.InputObjectTypeExtensions[ref].HasDirectives { + for _, i := range w.document.InputObjectTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InputObjectTypeExtensions[ref].HasInputFieldsDefinition { + for _, i := range w.document.InputObjectTypeExtensions[ref].InputFieldsDefinition.Refs { + w.walkInputValueDefinition(i) + } + } + + w.removeLastAncestor() + + w.visitor.LeaveInputObjectTypeExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkDirectiveDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterDirectiveDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindDirectiveDefinition) + + if w.document.DirectiveDefinitions[ref].HasArgumentsDefinitions { + for _, i := range w.document.DirectiveDefinitions[ref].ArgumentsDefinition.Refs { + w.walkInputValueDefinition(i) + } + } + + iter := w.document.DirectiveDefinitions[ref].DirectiveLocations.Iterable() + for iter.Next() { + w.walkDirectiveLocation(iter.Value()) + } + + w.removeLastAncestor() + + w.visitor.LeaveDirectiveDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkDirectiveLocation(location ast.DirectiveLocation) { + w.increaseDepth() + + w.visitor.EnterDirectiveLocation(location) + + w.visitor.LeaveDirectiveLocation(location) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkSchemaDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterSchemaDefinition(ref) + + w.appendAncestor(ref, ast.NodeKindSchemaDefinition) + + if w.document.SchemaDefinitions[ref].HasDirectives { + for _, i := range w.document.SchemaDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + for _, i := range w.document.SchemaDefinitions[ref].RootOperationTypeDefinitions.Refs { + w.walkRootOperationTypeDefinition(i) + } + + w.removeLastAncestor() + + w.visitor.LeaveSchemaDefinition(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkSchemaExtension(ref int) { + w.increaseDepth() + + w.visitor.EnterSchemaExtension(ref) + + w.appendAncestor(ref, ast.NodeKindSchemaExtension) + + if w.document.SchemaExtensions[ref].HasDirectives { + for _, i := range w.document.SchemaExtensions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + for _, i := range w.document.SchemaExtensions[ref].RootOperationTypeDefinitions.Refs { + w.walkRootOperationTypeDefinition(i) + } + + w.removeLastAncestor() + + w.visitor.LeaveSchemaExtension(ref) + + w.decreaseDepth() +} + +func (w *SimpleWalker) walkRootOperationTypeDefinition(ref int) { + w.increaseDepth() + + w.visitor.EnterRootOperationTypeDefinition(ref) + + w.visitor.LeaveRootOperationTypeDefinition(ref) + + w.decreaseDepth() +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/visitor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/visitor.go new file mode 100644 index 00000000000..2e747d00392 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor/visitor.go @@ -0,0 +1,3648 @@ +package astvisitor + +import ( + "bytes" + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +var ( + ErrDocumentMustNotBeNil = fmt.Errorf("document must not be nil") + ErrDefinitionMustNotBeNil = fmt.Errorf("definition must not be nil when walking operations") +) + +// Walker orchestrates the process of walking an AST and calling all registered callbacks +// Always use NewWalker to instantiate a new Walker +type Walker struct { + // Ancestors is the slice of Nodes to the current Node in a callback + // don't keep a reference to this slice, always copy it if you want to work with it after the callback returned + Ancestors []ast.Node + // Path is the slice of PathItems leading to the current Node + // don't keep a reference to this slice, always copy it if you want to work with it after the callback returned + Path ast.Path + // EnclosingTypeDefinition is the TypeDefinition Node of the parent object of the current callback + // e.g. if the current callback is a Field the EnclosingTypeDefinition will be the TypeDefinition of the parent object of such Field + EnclosingTypeDefinition ast.Node + // SelectionsBefore is the slice of references to selections before the current selection + // This is only valid when inside a SelectionSet + SelectionsBefore []int + // SelectionsAfter is the slice of references to selections before the current selection + // This is only valid when inside a SelectionSet + SelectionsAfter []int + // Report is the object to collect errors when walking the AST + Report *operationreport.Report + CurrentRef int + CurrentKind ast.NodeKind + document *ast.Document + definition *ast.Document + visitors visitors + Depth int + typeDefinitions []ast.Node + stop bool + skip bool + revisit bool + filter VisitorFilter + deferred []func() +} + +// NewWalker returns a fully initialized Walker +func NewWalker(ancestorSize int) Walker { + return Walker{ + Ancestors: make([]ast.Node, 0, ancestorSize), + Path: make([]ast.PathItem, 0, ancestorSize), + typeDefinitions: make([]ast.Node, 0, ancestorSize), + deferred: make([]func(), 0, 8), + } +} + +type ( + // EnterOperationDefinitionVisitor is the callback when the walker enters an operation definition + EnterOperationDefinitionVisitor interface { + // EnterOperationDefinition gets called when the walker enters an operation definition + // ref is the reference to the operation definition on the AST + EnterOperationDefinition(ref int) + } + // LeaveOperationDefinitionVisitor is the callback when the walker leaves an operation definition + LeaveOperationDefinitionVisitor interface { + // LeaveOperationDefinition gets called when the walker leaves an operation definition + // ref is the reference to the operation definition on the AST + LeaveOperationDefinition(ref int) + } + // OperationDefinitionVisitor is the callback when the walker enters or leaves an operation + OperationDefinitionVisitor interface { + EnterOperationDefinitionVisitor + LeaveOperationDefinitionVisitor + } + // EnterSelectionSetVisitor is the callback when the walker enters a selection set + EnterSelectionSetVisitor interface { + // EnterSelectionSet gets called when the walker enters a selection set + // ref is the reference to the selection set on the AST + EnterSelectionSet(ref int) + } + // LeaveSelectionSetVisitor is the callback when the walker leaves a selection set visitor + LeaveSelectionSetVisitor interface { + // LeaveSelectionSet gets called when the walker leaves a selection set + // ref is the reference to the selection set on the AST + LeaveSelectionSet(ref int) + } + // SelectionSetVisitor is the callback when the walker enters or leaves a selection set + SelectionSetVisitor interface { + EnterSelectionSetVisitor + LeaveSelectionSetVisitor + } + // EnterFieldVisitor is the callback when the walker enters a field + EnterFieldVisitor interface { + // EnterField gets called when the walker enters a field + // ref is the reference to the selection set on the AST + EnterField(ref int) + } + // LeaveFieldVisitor is the callback when the walker leaves a field + LeaveFieldVisitor interface { + // LeaveField gets called when the walker leaves a field + // ref is the reference to the selection set on the AST + LeaveField(ref int) + } + // FieldVisitor is the callback when the walker enters or leaves a field + FieldVisitor interface { + EnterFieldVisitor + LeaveFieldVisitor + } + // EnterArgumentVisitor is the callback when the walker enters an argument + EnterArgumentVisitor interface { + // EnterArgument gets called when the walker enters an argument + // ref is the reference to the selection set on the AST + EnterArgument(ref int) + } + // LeaveArgumentVisitor is the callback when the walker leaves an argument + LeaveArgumentVisitor interface { + // LeaveArgument gets called when the walker leaves an argument + // ref is the reference to the selection set on the AST + LeaveArgument(ref int) + } + // ArgumentVisitor is the callback when the walker enters or leaves an argument + ArgumentVisitor interface { + EnterArgumentVisitor + LeaveArgumentVisitor + } + // EnterFragmentSpreadVisitor is the callback when the walker enters a fragment spread + EnterFragmentSpreadVisitor interface { + // EnterFragmentSpread gets called when the walker enters a fragment spread + // ref is the reference to the selection set on the AST + EnterFragmentSpread(ref int) + } + // LeaveFragmentSpreadVisitor is the callback when the walker leaves a fragment spread + LeaveFragmentSpreadVisitor interface { + // LeaveFragmentSpread gets called when the walker leaves a fragment spread + // ref is the reference to the selection set on the AST + LeaveFragmentSpread(ref int) + } + // FragmentSpreadVisitor is the callback when the walker enters or leaves a fragment spread + FragmentSpreadVisitor interface { + EnterFragmentSpreadVisitor + LeaveFragmentSpreadVisitor + } + // EnterFragmentSpreadVisitor is the callback when the walker enters an inline framgnet + EnterInlineFragmentVisitor interface { + // EnterInlineFragment gets called when the walker enters an inline fragment + // ref is the reference to the selection set on the AST + EnterInlineFragment(ref int) + } + // LeaveInlineFragmentVisitor is the callback when the walker leaves an inline fragment + LeaveInlineFragmentVisitor interface { + // LeaveInlineFragment gets called when the walker leaves an inline fragment + // ref is the reference to the selection set on the AST + LeaveInlineFragment(ref int) + } + // InlineFragmentVisitor is the callback when the walker enters or leaves an inline fragment + InlineFragmentVisitor interface { + EnterInlineFragmentVisitor + LeaveInlineFragmentVisitor + } + // EnterFragmentDefinitionVisitor is the callback when the walker enters a fragment definition + EnterFragmentDefinitionVisitor interface { + // EnterFragmentDefinition gets called when the walker enters a fragment definition + // ref is the reference to the selection set on the AST + EnterFragmentDefinition(ref int) + } + // LeaveFragmentDefinitionVisitor is the callback when the walker leaves a fragment definition + LeaveFragmentDefinitionVisitor interface { + // LeaveFragmentDefinition gets called when the walker leaves a fragment definition + // ref is the reference to the selection set on the AST + LeaveFragmentDefinition(ref int) + } + // FragmentDefinitionVisitor is the callback when the walker enters or leaves a fragment definition + FragmentDefinitionVisitor interface { + EnterFragmentDefinitionVisitor + LeaveFragmentDefinitionVisitor + } + // EnterFragmentDefinitionVisitor is the callback when the walker enters a variable definition + EnterVariableDefinitionVisitor interface { + // EnterVariableDefinition gets called when the walker enters a variable definition + // ref is the reference to the selection set on the AST + EnterVariableDefinition(ref int) + } + // LeaveVariableDefinitionVisitor is the callback when the walker leaves a variable definition + LeaveVariableDefinitionVisitor interface { + // LeaveVariableDefinition gets called when the walker leaves a variable definition + // ref is the reference to the selection set on the AST + LeaveVariableDefinition(ref int) + } + // VariableDefinitionVisitor is the callback when the walker enters or leaves a variable definition + VariableDefinitionVisitor interface { + EnterVariableDefinitionVisitor + LeaveVariableDefinitionVisitor + } + // EnterDirectiveVisitor is the callback when the walker enters a directive + EnterDirectiveVisitor interface { + // EnterDirective gets called when the walker enters a directive + // ref is the reference to the selection set on the AST + EnterDirective(ref int) + } + // LeaveDirectiveVisitor is the callback when the walker leaves a directive + LeaveDirectiveVisitor interface { + // LeaveDirective gets called when the walker leaves a directive + // ref is the reference to the selection set on the AST + LeaveDirective(ref int) + } + // DirectiveVisitor is the callback when the walker enters or leaves a directive + DirectiveVisitor interface { + EnterDirectiveVisitor + LeaveDirectiveVisitor + } + // EnterObjectTypeDefinitionVisitor is the callback when the walker enters an object type definition + EnterObjectTypeDefinitionVisitor interface { + // EnterObjectTypeDefinition gets called when the walker enters an object type definition + // ref is the reference to the selection set on the AST + EnterObjectTypeDefinition(ref int) + } + // LeaveObjectTypeDefinitionVisitor is the callback when the walker leaves an object type definition + LeaveObjectTypeDefinitionVisitor interface { + // LeaveObjectTypeDefinition gets called when the walker leaves an object type definition + // ref is the reference to the selection set on the AST + LeaveObjectTypeDefinition(ref int) + } + // ObjectTypeDefinitionVisitor is the callback when the walker enters or leaves an object type definition + ObjectTypeDefinitionVisitor interface { + EnterObjectTypeDefinitionVisitor + LeaveObjectTypeDefinitionVisitor + } + // EnterObjectTypeExtensionVisitor is the callback when the walker enters an object type extension + EnterObjectTypeExtensionVisitor interface { + // EnterObjectTypeExtension gets called when the walker enters an object type extension + // ref is the reference to the selection set on the AST + EnterObjectTypeExtension(ref int) + } + // LeaveObjectTypeExtensionVisitor is the callback when the walker leaves an object type extension + LeaveObjectTypeExtensionVisitor interface { + // LeaveObjectTypeExtension gets called when the walker leaves an object type extension + // ref is the reference to the selection set on the AST + LeaveObjectTypeExtension(ref int) + } + // ObjectTypeExtensionVisitor is the callback when the walker enters or leaves an object type extension + ObjectTypeExtensionVisitor interface { + EnterObjectTypeExtensionVisitor + LeaveObjectTypeExtensionVisitor + } + // EnterFieldDefinitionVisitor is the callback when the walker enters a field definition + EnterFieldDefinitionVisitor interface { + // EnterFieldDefinition gets called when the walker enters a field definition + // ref is the reference to the selection set on the AST + EnterFieldDefinition(ref int) + } + // LeaveFieldDefinitionVisitor is the callback when the walker leaves a field definition + LeaveFieldDefinitionVisitor interface { + // LeaveFieldDefinition gets called when the walker leaves a field definition + // ref is the reference to the selection set on the AST + LeaveFieldDefinition(ref int) + } + // FieldDefinitionVisitor is the callback when the walker enters or leaves a field definition + FieldDefinitionVisitor interface { + EnterFieldDefinitionVisitor + LeaveFieldDefinitionVisitor + } + // EnterInputValueDefinitionVisitor is the callback when the walker enters an input value definition + EnterInputValueDefinitionVisitor interface { + // EnterInputValueDefinition gets called when the walker enters an input value definition + // ref is the reference to the selection set on the AST + EnterInputValueDefinition(ref int) + } + // LeaveInputValueDefinitionVisitor is the callback when the walker leaves an input value definition + LeaveInputValueDefinitionVisitor interface { + // LeaveInputValueDefinition gets called when the walker leaves an input value definition + // ref is the reference to the selection set on the AST + LeaveInputValueDefinition(ref int) + } + // InputValueDefinitionVisitor is the callback when the walker enters or leaves an input value definition + InputValueDefinitionVisitor interface { + EnterInputValueDefinitionVisitor + LeaveInputValueDefinitionVisitor + } + // EnterInterfaceTypeDefinitionVisitor is the callback when the walker enters an interface type definition + EnterInterfaceTypeDefinitionVisitor interface { + // EnterInterfaceTypeDefinition gets called when the walker enters an interface type definition + // ref is the reference to the selection set on the AST + EnterInterfaceTypeDefinition(ref int) + } + // LeaveInterfaceTypeDefinitionVisitor is the callback when the walker leaves an interface type definition + LeaveInterfaceTypeDefinitionVisitor interface { + // LeaveInterfaceTypeDefinition gets called when the walker leaves an interface type definition + // ref is the reference to the selection set on the AST + LeaveInterfaceTypeDefinition(ref int) + } + // InterfaceTypeDefinitionVisitor is the callback when the walker enters or leaves an interface type definition + InterfaceTypeDefinitionVisitor interface { + EnterInterfaceTypeDefinitionVisitor + LeaveInterfaceTypeDefinitionVisitor + } + // EnterInterfaceTypeExtensionVisitor is the callback when the walker enters an interface type extension + EnterInterfaceTypeExtensionVisitor interface { + // EnterInterfaceTypeExtension gets called when the walker enters an interface type extension + // ref is the reference to the selection set on the AST + EnterInterfaceTypeExtension(ref int) + } + // LeaveInterfaceTypeExtensionVisitor is the callback when the walker leaves an interface type extension + LeaveInterfaceTypeExtensionVisitor interface { + // LeaveInterfaceTypeExtension gets called when the walker leaves an interface type extension + // ref is the reference to the selection set on the AST + LeaveInterfaceTypeExtension(ref int) + } + // InterfaceTypeExtensionVisitor is the callback when the walker enters or leaves an interface type extension + InterfaceTypeExtensionVisitor interface { + EnterInterfaceTypeExtensionVisitor + LeaveInterfaceTypeExtensionVisitor + } + // EnterScalarTypeDefinitionVisitor is the callback when the walker enters a scalar type definition + EnterScalarTypeDefinitionVisitor interface { + // EnterScalarTypeDefinition gets called when the walker enters a scalar type definition + // ref is the reference to the selection set on the AST + EnterScalarTypeDefinition(ref int) + } + // LeaveScalarTypeDefinitionVisitor is the callback when the walker leaves a scalar type definition + LeaveScalarTypeDefinitionVisitor interface { + // LeaveScalarTypeDefinition gets called when the walker leaves a scalar type definition + // ref is the reference to the selection set on the AST + LeaveScalarTypeDefinition(ref int) + } + // ScalarTypeDefinitionVisitor is the callback when the walker enters or leaves a scalar type definition + ScalarTypeDefinitionVisitor interface { + EnterScalarTypeDefinitionVisitor + LeaveScalarTypeDefinitionVisitor + } + // EnterScalarTypeExtensionVisitor is the callback when the walker enters a scalar type extension + EnterScalarTypeExtensionVisitor interface { + // EnterScalarTypeExtension gets called when the walker enters a scalar type extension + // ref is the reference to the selection set on the AST + EnterScalarTypeExtension(ref int) + } + // LeaveScalarTypeExtensionVisitor is the callback when the walker leaves a scalar type extension + LeaveScalarTypeExtensionVisitor interface { + // LeaveScalarTypeExtension gets called when the walker leaves a scalar type extension + // ref is the reference to the selection set on the AST + LeaveScalarTypeExtension(ref int) + } + // ScalarTypeExtensionVisitor is the callback when the walker enters or leaves a scalar type extension + ScalarTypeExtensionVisitor interface { + EnterScalarTypeExtensionVisitor + LeaveScalarTypeExtensionVisitor + } + // EnterUnionTypeDefinitionVisitor is the callback when the walker enters a union type definition + EnterUnionTypeDefinitionVisitor interface { + // EnterUnionTypeDefinition gets called when the walker enters a union type definition + // ref is the reference to the selection set on the AST + EnterUnionTypeDefinition(ref int) + } + // LeaveUnionTypeDefinitionVisitor is the callback when the walker leaves a union type definition + LeaveUnionTypeDefinitionVisitor interface { + // LeaveUnionTypeDefinition gets called when the walker leaves a union type definition + // ref is the reference to the selection set on the AST + LeaveUnionTypeDefinition(ref int) + } + // UnionTypeDefinitionVisitor is the callback when the walker enters or leaves a union type definition + UnionTypeDefinitionVisitor interface { + EnterUnionTypeDefinitionVisitor + LeaveUnionTypeDefinitionVisitor + } + // EnterUnionTypeExtensionVisitor is the callback when the walker enters a union type extension + EnterUnionTypeExtensionVisitor interface { + // EnterUnionTypeExtension gets called when the walker enters a union type extension + // ref is the reference to the selection set on the AST + EnterUnionTypeExtension(ref int) + } + // LeaveUnionTypeExtensionVisitor is the callback when the walker leaves a union type extension + LeaveUnionTypeExtensionVisitor interface { + // LeaveUnionTypeExtension gets called when the walker leaves a union type extension + // ref is the reference to the selection set on the AST + LeaveUnionTypeExtension(ref int) + } + // UnionTypeExtensionVisitor is the callback when the walker enters or leaves a union type extension + UnionTypeExtensionVisitor interface { + EnterUnionTypeExtensionVisitor + LeaveUnionTypeExtensionVisitor + } + // EnterUnionMemberTypeVisitor is the callback when the walker enters a union member type + EnterUnionMemberTypeVisitor interface { + // EnterUnionMemberType gets called when the walker enters a union member type + // ref is the reference to the selection set on the AST + EnterUnionMemberType(ref int) + } + // LeaveUnionMemberTypeVisitor is the callback when the walker leaves a union member type + LeaveUnionMemberTypeVisitor interface { + // LeaveUnionMemberType gets called when the walker leaves a union member type + // ref is the reference to the selection set on the AST + LeaveUnionMemberType(ref int) + } + // UnionMemberTypeVisitor is the callback when the walker enters or leaves a union member type + UnionMemberTypeVisitor interface { + EnterUnionMemberTypeVisitor + LeaveUnionMemberTypeVisitor + } + // EnterEnumTypeDefinitionVisitor is the callback when the walker enters an enum type definition + EnterEnumTypeDefinitionVisitor interface { + // EnterEnumTypeDefinition gets called when the walker enters an enum type definition + // ref is the reference to the selection set on the AST + EnterEnumTypeDefinition(ref int) + } + // LeaveEnumTypeDefinitionVisitor is the callback when the walker leaves an enum type definition + LeaveEnumTypeDefinitionVisitor interface { + // LeaveEnumTypeDefinition gets called when the walker leaves an enum type definition + // ref is the reference to the selection set on the AST + LeaveEnumTypeDefinition(ref int) + } + // EnumTypeDefinitionVisitor is the callback when the walker enters or leaves an enum type definition + EnumTypeDefinitionVisitor interface { + EnterEnumTypeDefinitionVisitor + LeaveEnumTypeDefinitionVisitor + } + // EnterEnumTypeExtensionVisitor is the callback when the walker enters an enum type extension + EnterEnumTypeExtensionVisitor interface { + // EnterEnumTypeExtension gets called when the walker enters an enum type extension + // ref is the reference to the selection set on the AST + EnterEnumTypeExtension(ref int) + } + // LeaveEnumTypeExtensionVisitor is the callback when the walker leaves an enum type extension + LeaveEnumTypeExtensionVisitor interface { + // LeaveEnumTypeExtension gets called when the walker leaves an enum type extension + // ref is the reference to the selection set on the AST + LeaveEnumTypeExtension(ref int) + } + // EnumTypeExtensionVisitor is the callback when the walker enters or leaves an enum type extension + EnumTypeExtensionVisitor interface { + EnterEnumTypeExtensionVisitor + LeaveEnumTypeExtensionVisitor + } + // EnterEnumValueDefinitionVisitor is the callback when the walker enters an enum value definition + EnterEnumValueDefinitionVisitor interface { + // EnterEnumValueDefinition gets called when the walker enters an enum value definition + // ref is the reference to the selection set on the AST + EnterEnumValueDefinition(ref int) + } + // LeaveEnumValueDefinitionVisitor is the callback when the walker leaves an enum value definition + LeaveEnumValueDefinitionVisitor interface { + // LeaveEnumValueDefinition gets called when the walker leaves an enum value definition + // ref is the reference to the selection set on the AST + LeaveEnumValueDefinition(ref int) + } + // EnumValueDefinitionVisitor is the callback when the walker enters or leaves an enum value definition + EnumValueDefinitionVisitor interface { + EnterEnumValueDefinitionVisitor + LeaveEnumValueDefinitionVisitor + } + // EnterInputObjectTypeDefinitionVisitor is the callback when the walker enters an input object type definition + EnterInputObjectTypeDefinitionVisitor interface { + // EnterInputObjectTypeDefinition gets called when the walker enters an input object type definition + // ref is the reference to the selection set on the AST + EnterInputObjectTypeDefinition(ref int) + } + // LeaveInputObjectTypeDefinitionVisitor is the callback when the walker leaves an input object type definition + LeaveInputObjectTypeDefinitionVisitor interface { + // LeaveInputObjectTypeDefinition gets called when the walker leaves an input object type definition + // ref is the reference to the selection set on the AST + LeaveInputObjectTypeDefinition(ref int) + } + // InputObjectTypeDefinitionVisitor is the callback when the walker enters or leaves an input object type definition + InputObjectTypeDefinitionVisitor interface { + EnterInputObjectTypeDefinitionVisitor + LeaveInputObjectTypeDefinitionVisitor + } + // EnterInputObjectTypeExtensionVisitor is the callback when the walker enters an input object type extension + EnterInputObjectTypeExtensionVisitor interface { + // EnterInputObjectTypeExtension gets called when the walker enters an input object type extension + // ref is the reference to the selection set on the AST + EnterInputObjectTypeExtension(ref int) + } + // LeaveInputObjectTypeExtensionVisitor is the callback when the walker leaves an input object type extension + LeaveInputObjectTypeExtensionVisitor interface { + // LeaveInputObjectTypeExtension gets called when the walker leaves an input object type extension + // ref is the reference to the selection set on the AST + LeaveInputObjectTypeExtension(ref int) + } + // InputObjectTypeExtensionVisitor is the callback when the walker enters or leaves an input object type extension + InputObjectTypeExtensionVisitor interface { + EnterInputObjectTypeExtensionVisitor + LeaveInputObjectTypeExtensionVisitor + } + // EnterDirectiveDefinitionVisitor is the callback when the walker enters a directive definition + EnterDirectiveDefinitionVisitor interface { + // EnterDirectiveDefinition gets called when the walker enters a directive definition + // ref is the reference to the selection set on the AST + EnterDirectiveDefinition(ref int) + } + // LeaveDirectiveDefinitionVisitor is the callback when the walker leaves a directive definition + LeaveDirectiveDefinitionVisitor interface { + // LeaveDirectiveDefinition gets called when the walker leaves a directive definition + // ref is the reference to the selection set on the AST + LeaveDirectiveDefinition(ref int) + } + // DirectiveDefinitionVisitor is the callback when the walker enters or leaves a directive definition + DirectiveDefinitionVisitor interface { + EnterDirectiveDefinitionVisitor + LeaveDirectiveDefinitionVisitor + } + // EnterDirectiveLocationVisitor is the callback when the walker enters a directive location + EnterDirectiveLocationVisitor interface { + // EnterDirectiveLocation gets called when the walker enters a directive location + // ref is the reference to the selection set on the AST + EnterDirectiveLocation(location ast.DirectiveLocation) + } + // LeaveDirectiveLocationVisitor is the callback when the walker leaves a directive location + LeaveDirectiveLocationVisitor interface { + // LeaveDirectiveLocation gets called when the walker leaves a directive location + // ref is the reference to the selection set on the AST + LeaveDirectiveLocation(location ast.DirectiveLocation) + } + // DirectiveLocationVisitor is the callback when the walker enters or leaves a directive location + DirectiveLocationVisitor interface { + EnterDirectiveLocationVisitor + LeaveDirectiveLocationVisitor + } + // EnterSchemaDefinitionVisitor is the callback when the walker enters a schema definition + EnterSchemaDefinitionVisitor interface { + // EnterSchemaDefinition gets called when the walker enters a schema definition + // ref is the reference to the selection set on the AST + EnterSchemaDefinition(ref int) + } + // LeaveSchemaDefinitionVisitor is the callback when the walker leaves a schema definition + LeaveSchemaDefinitionVisitor interface { + // LeaveSchemaDefinition gets called when the walker leaves a schema definition + // ref is the reference to the selection set on the AST + LeaveSchemaDefinition(ref int) + } + // SchemaDefinitionVisitor is the callback when the walker enters or leaves a schema definition + SchemaDefinitionVisitor interface { + EnterSchemaDefinitionVisitor + LeaveSchemaDefinitionVisitor + } + // EnterSchemaExtensionVisitor is the callback when the walker enters a schema extension + EnterSchemaExtensionVisitor interface { + // EnterSchemaExtension gets called when the walker enters a schema extension + // ref is the reference to the selection set on the AST + EnterSchemaExtension(ref int) + } + // LeaveSchemaExtensionVisitor is the callback when the walker leaves a schema extension + LeaveSchemaExtensionVisitor interface { + // LeaveSchemaExtension gets called when the walker leaves a schema extension + // ref is the reference to the selection set on the AST + LeaveSchemaExtension(ref int) + } + // SchemaExtensionVisitor is the callback when the walker enters or leaves a schema extension + SchemaExtensionVisitor interface { + EnterSchemaExtensionVisitor + LeaveSchemaExtensionVisitor + } + // EnterRootOperationTypeDefinitionVisitor is the callback when the walker enters a root operation type definition + EnterRootOperationTypeDefinitionVisitor interface { + // EnterRootOperationTypeDefinition gets called when the walker enters a root operation type definition + // ref is the reference to the selection set on the AST + EnterRootOperationTypeDefinition(ref int) + } + // LeaveRootOperationTypeDefinitionVisitor is the callback when the walker leaves a root operation type definition + LeaveRootOperationTypeDefinitionVisitor interface { + // LeaveRootOperationTypeDefinition gets called when the walker leaves a root operation type definition + // ref is the reference to the selection set on the AST + LeaveRootOperationTypeDefinition(ref int) + } + // RootOperationTypeDefinitionVisitor is the callback when the walker enters or leaves a root operation type definition + RootOperationTypeDefinitionVisitor interface { + EnterRootOperationTypeDefinitionVisitor + LeaveRootOperationTypeDefinitionVisitor + } + // TypeSystemVisitor is the callback when the walker enters or leaves any of the type definitions + TypeSystemVisitor interface { + ObjectTypeDefinitionVisitor + ObjectTypeExtensionVisitor + FieldDefinitionVisitor + InputValueDefinitionVisitor + InterfaceTypeDefinitionVisitor + InterfaceTypeExtensionVisitor + ScalarTypeDefinitionVisitor + ScalarTypeExtensionVisitor + UnionTypeDefinitionVisitor + UnionTypeExtensionVisitor + UnionMemberTypeVisitor + EnumTypeDefinitionVisitor + EnumTypeExtensionVisitor + EnumValueDefinitionVisitor + InputObjectTypeDefinitionVisitor + InputObjectTypeExtensionVisitor + DirectiveDefinitionVisitor + DirectiveLocationVisitor + SchemaDefinitionVisitor + SchemaExtensionVisitor + RootOperationTypeDefinitionVisitor + } + // ExecutableVisitor is the callback when the walker enters or leaves any of the executable definitions + ExecutableVisitor interface { + OperationDefinitionVisitor + SelectionSetVisitor + FieldVisitor + ArgumentVisitor + FragmentSpreadVisitor + InlineFragmentVisitor + FragmentDefinitionVisitor + VariableDefinitionVisitor + DirectiveVisitor + } + // EnterDocumentVisitor is the callback when the walker enters a document + EnterDocumentVisitor interface { + // EnterDocument gets called when the walker enters a document + EnterDocument(operation, definition *ast.Document) + } + LeaveDocumentVisitor interface { + // LeaveDocument gets called when the walker leaves a document + LeaveDocument(operation, definition *ast.Document) + } + // DocumentVisitor is the callback when the walker enters or leaves a document + DocumentVisitor interface { + EnterDocumentVisitor + LeaveDocumentVisitor + } + // AllNodesVisitor is the callback when the walker enters or leaves any Node + AllNodesVisitor interface { + DocumentVisitor + TypeSystemVisitor + ExecutableVisitor + } + // VisitorFilter can be defined to prevent specific visitors from getting invoked + VisitorFilter interface { + AllowVisitor(kind VisitorKind, ref int, visitor interface{}) bool + } +) + +type VisitorKind int + +const ( + EnterOperation VisitorKind = iota + 1 + LeaveOperation + EnterSelectionSet + LeaveSelectionSet + EnterField + LeaveField + EnterArgument + LeaveArgument + EnterFragmentSpread + LeaveFragmentSpread + EnterInlineFragment + LeaveInlineFragment + EnterFragmentDefinition + LeaveFragmentDefinition + EnterDocument + LeaveDocument + EnterVariableDefinition + LeaveVariableDefinition + EnterDirective + LeaveDirective + EnterObjectTypeDefinition + LeaveObjectTypeDefinition + EnterFieldDefinition + LeaveFieldDefinition + EnterInputValueDefinition + LeaveInputValueDefinition + EnterInterfaceTypeDefinition + LeaveInterfaceTypeDefinition + EnterInterfaceTypeExtension + LeaveInterfaceTypeExtension + EnterObjectTypeExtension + LeaveObjectTypeExtension + EnterScalarTypeDefinition + LeaveScalarTypeDefinition + EnterScalarTypeExtension + LeaveScalarTypeExtension + EnterUnionTypeDefinition + LeaveUnionTypeDefinition + EnterUnionTypeExtension + LeaveUnionTypeExtension + EnterUnionMemberType + LeaveUnionMemberType + EnterEnumTypeDefinition + LeaveEnumTypeDefinition + EnterEnumTypeExtension + LeaveEnumTypeExtension + EnterEnumValueDefinition + LeaveEnumValueDefinition + EnterInputObjectTypeDefinition + LeaveInputObjectTypeDefinition + EnterInputObjectTypeExtension + LeaveInputObjectTypeExtension + EnterDirectiveDefinition + LeaveDirectiveDefinition + EnterDirectiveLocation + LeaveDirectiveLocation + EnterSchemaDefinition + LeaveSchemaDefinition + EnterSchemaExtension + LeaveSchemaExtension + EnterRootOperationTypeDefinition + LeaveRootOperationTypeDefinition +) + +type visitors struct { + enterOperation []EnterOperationDefinitionVisitor + leaveOperation []LeaveOperationDefinitionVisitor + enterSelectionSet []EnterSelectionSetVisitor + leaveSelectionSet []LeaveSelectionSetVisitor + enterField []EnterFieldVisitor + leaveField []LeaveFieldVisitor + enterArgument []EnterArgumentVisitor + leaveArgument []LeaveArgumentVisitor + enterFragmentSpread []EnterFragmentSpreadVisitor + leaveFragmentSpread []LeaveFragmentSpreadVisitor + enterInlineFragment []EnterInlineFragmentVisitor + leaveInlineFragment []LeaveInlineFragmentVisitor + enterFragmentDefinition []EnterFragmentDefinitionVisitor + leaveFragmentDefinition []LeaveFragmentDefinitionVisitor + enterDocument []EnterDocumentVisitor + leaveDocument []LeaveDocumentVisitor + enterVariableDefinition []EnterVariableDefinitionVisitor + leaveVariableDefinition []LeaveVariableDefinitionVisitor + enterDirective []EnterDirectiveVisitor + leaveDirective []LeaveDirectiveVisitor + enterObjectTypeDefinition []EnterObjectTypeDefinitionVisitor + leaveObjectTypeDefinition []LeaveObjectTypeDefinitionVisitor + enterFieldDefinition []EnterFieldDefinitionVisitor + leaveFieldDefinition []LeaveFieldDefinitionVisitor + enterInputValueDefinition []EnterInputValueDefinitionVisitor + leaveInputValueDefinition []LeaveInputValueDefinitionVisitor + enterInterfaceTypeDefinition []EnterInterfaceTypeDefinitionVisitor + leaveInterfaceTypeDefinition []LeaveInterfaceTypeDefinitionVisitor + enterInterfaceTypeExtension []EnterInterfaceTypeExtensionVisitor + leaveInterfaceTypeExtension []LeaveInterfaceTypeExtensionVisitor + enterObjectTypeExtension []EnterObjectTypeExtensionVisitor + leaveObjectTypeExtension []LeaveObjectTypeExtensionVisitor + enterScalarTypeDefinition []EnterScalarTypeDefinitionVisitor + leaveScalarTypeDefinition []LeaveScalarTypeDefinitionVisitor + enterScalarTypeExtension []EnterScalarTypeExtensionVisitor + leaveScalarTypeExtension []LeaveScalarTypeExtensionVisitor + enterUnionTypeDefinition []EnterUnionTypeDefinitionVisitor + leaveUnionTypeDefinition []LeaveUnionTypeDefinitionVisitor + enterUnionTypeExtension []EnterUnionTypeExtensionVisitor + leaveUnionTypeExtension []LeaveUnionTypeExtensionVisitor + enterUnionMemberType []EnterUnionMemberTypeVisitor + leaveUnionMemberType []LeaveUnionMemberTypeVisitor + enterEnumTypeDefinition []EnterEnumTypeDefinitionVisitor + leaveEnumTypeDefinition []LeaveEnumTypeDefinitionVisitor + enterEnumTypeExtension []EnterEnumTypeExtensionVisitor + leaveEnumTypeExtension []LeaveEnumTypeExtensionVisitor + enterEnumValueDefinition []EnterEnumValueDefinitionVisitor + leaveEnumValueDefinition []LeaveEnumValueDefinitionVisitor + enterInputObjectTypeDefinition []EnterInputObjectTypeDefinitionVisitor + leaveInputObjectTypeDefinition []LeaveInputObjectTypeDefinitionVisitor + enterInputObjectTypeExtension []EnterInputObjectTypeExtensionVisitor + leaveInputObjectTypeExtension []LeaveInputObjectTypeExtensionVisitor + enterDirectiveDefinition []EnterDirectiveDefinitionVisitor + leaveDirectiveDefinition []LeaveDirectiveDefinitionVisitor + enterDirectiveLocation []EnterDirectiveLocationVisitor + leaveDirectiveLocation []LeaveDirectiveLocationVisitor + enterSchemaDefinition []EnterSchemaDefinitionVisitor + leaveSchemaDefinition []LeaveSchemaDefinitionVisitor + enterSchemaExtension []EnterSchemaExtensionVisitor + leaveSchemaExtension []LeaveSchemaExtensionVisitor + enterRootOperationTypeDefinition []EnterRootOperationTypeDefinitionVisitor + leaveRootOperationTypeDefinition []LeaveRootOperationTypeDefinitionVisitor +} + +// ResetVisitors empties all registered visitors / unregisters all callbacks +func (w *Walker) ResetVisitors() { + w.visitors.enterOperation = w.visitors.enterOperation[:0] + w.visitors.leaveOperation = w.visitors.leaveOperation[:0] + w.visitors.enterSelectionSet = w.visitors.enterSelectionSet[:0] + w.visitors.leaveSelectionSet = w.visitors.leaveSelectionSet[:0] + w.visitors.enterField = w.visitors.enterField[:0] + w.visitors.leaveField = w.visitors.leaveField[:0] + w.visitors.enterArgument = w.visitors.enterArgument[:0] + w.visitors.leaveArgument = w.visitors.leaveArgument[:0] + w.visitors.enterFragmentSpread = w.visitors.enterFragmentSpread[:0] + w.visitors.leaveFragmentSpread = w.visitors.leaveFragmentSpread[:0] + w.visitors.enterInlineFragment = w.visitors.enterInlineFragment[:0] + w.visitors.leaveInlineFragment = w.visitors.leaveInlineFragment[:0] + w.visitors.enterFragmentDefinition = w.visitors.enterFragmentDefinition[:0] + w.visitors.leaveFragmentDefinition = w.visitors.leaveFragmentDefinition[:0] + w.visitors.enterDocument = w.visitors.enterDocument[:0] + w.visitors.leaveDocument = w.visitors.leaveDocument[:0] + w.visitors.enterVariableDefinition = w.visitors.enterVariableDefinition[:0] + w.visitors.leaveVariableDefinition = w.visitors.leaveVariableDefinition[:0] + w.visitors.enterDirective = w.visitors.enterDirective[:0] + w.visitors.leaveDirective = w.visitors.leaveDirective[:0] + w.visitors.enterObjectTypeDefinition = w.visitors.enterObjectTypeDefinition[:0] + w.visitors.leaveObjectTypeDefinition = w.visitors.leaveObjectTypeDefinition[:0] + w.visitors.enterFieldDefinition = w.visitors.enterFieldDefinition[:0] + w.visitors.leaveFieldDefinition = w.visitors.leaveFieldDefinition[:0] + w.visitors.enterInputValueDefinition = w.visitors.enterInputValueDefinition[:0] + w.visitors.leaveInputValueDefinition = w.visitors.leaveInputValueDefinition[:0] + w.visitors.enterInterfaceTypeDefinition = w.visitors.enterInterfaceTypeDefinition[:0] + w.visitors.leaveInterfaceTypeDefinition = w.visitors.leaveInterfaceTypeDefinition[:0] + w.visitors.enterInterfaceTypeExtension = w.visitors.enterInterfaceTypeExtension[:0] + w.visitors.leaveInterfaceTypeExtension = w.visitors.leaveInterfaceTypeExtension[:0] + w.visitors.enterObjectTypeExtension = w.visitors.enterObjectTypeExtension[:0] + w.visitors.leaveObjectTypeExtension = w.visitors.leaveObjectTypeExtension[:0] + w.visitors.enterScalarTypeDefinition = w.visitors.enterScalarTypeDefinition[:0] + w.visitors.leaveScalarTypeDefinition = w.visitors.leaveScalarTypeDefinition[:0] + w.visitors.enterScalarTypeExtension = w.visitors.enterScalarTypeExtension[:0] + w.visitors.leaveScalarTypeExtension = w.visitors.leaveScalarTypeExtension[:0] + w.visitors.enterUnionTypeDefinition = w.visitors.enterUnionTypeDefinition[:0] + w.visitors.leaveUnionTypeDefinition = w.visitors.leaveUnionTypeDefinition[:0] + w.visitors.enterUnionTypeExtension = w.visitors.enterUnionTypeExtension[:0] + w.visitors.leaveUnionTypeExtension = w.visitors.leaveUnionTypeExtension[:0] + w.visitors.enterUnionMemberType = w.visitors.enterUnionMemberType[:0] + w.visitors.leaveUnionMemberType = w.visitors.leaveUnionMemberType[:0] + w.visitors.enterEnumTypeDefinition = w.visitors.enterEnumTypeDefinition[:0] + w.visitors.leaveEnumTypeDefinition = w.visitors.leaveEnumTypeDefinition[:0] + w.visitors.enterEnumTypeExtension = w.visitors.enterEnumTypeExtension[:0] + w.visitors.leaveEnumTypeExtension = w.visitors.leaveEnumTypeExtension[:0] + w.visitors.enterEnumValueDefinition = w.visitors.enterEnumValueDefinition[:0] + w.visitors.leaveEnumValueDefinition = w.visitors.leaveEnumValueDefinition[:0] + w.visitors.enterInputObjectTypeDefinition = w.visitors.enterInputObjectTypeDefinition[:0] + w.visitors.leaveInputObjectTypeDefinition = w.visitors.leaveInputObjectTypeDefinition[:0] + w.visitors.enterInputObjectTypeExtension = w.visitors.enterInputObjectTypeExtension[:0] + w.visitors.leaveInputObjectTypeExtension = w.visitors.leaveInputObjectTypeExtension[:0] + w.visitors.enterDirectiveDefinition = w.visitors.enterDirectiveDefinition[:0] + w.visitors.leaveDirectiveDefinition = w.visitors.leaveDirectiveDefinition[:0] + w.visitors.enterDirectiveLocation = w.visitors.enterDirectiveLocation[:0] + w.visitors.leaveDirectiveLocation = w.visitors.leaveDirectiveLocation[:0] + w.visitors.enterSchemaDefinition = w.visitors.enterSchemaDefinition[:0] + w.visitors.leaveSchemaDefinition = w.visitors.leaveSchemaDefinition[:0] + w.visitors.enterSchemaExtension = w.visitors.enterSchemaExtension[:0] + w.visitors.leaveSchemaExtension = w.visitors.leaveSchemaExtension[:0] + w.visitors.enterRootOperationTypeDefinition = w.visitors.enterRootOperationTypeDefinition[:0] + w.visitors.leaveRootOperationTypeDefinition = w.visitors.leaveRootOperationTypeDefinition[:0] + w.deferred = w.deferred[:0] +} + +func (w *Walker) setCurrent(kind ast.NodeKind, ref int) { + w.CurrentKind = kind + w.CurrentRef = ref +} + +func (w *Walker) RegisterExecutableVisitor(visitor ExecutableVisitor) { + w.RegisterOperationDefinitionVisitor(visitor) + w.RegisterSelectionSetVisitor(visitor) + w.RegisterFieldVisitor(visitor) + w.RegisterArgumentVisitor(visitor) + w.RegisterFragmentSpreadVisitor(visitor) + w.RegisterInlineFragmentVisitor(visitor) + w.RegisterFragmentDefinitionVisitor(visitor) + w.RegisterVariableDefinitionVisitor(visitor) + w.RegisterDirectiveVisitor(visitor) +} + +func (w *Walker) RegisterTypeSystemVisitor(visitor TypeSystemVisitor) { + w.RegisterObjectTypeDefinitionVisitor(visitor) + w.RegisterObjectTypeExtensionVisitor(visitor) + w.RegisterFieldDefinitionVisitor(visitor) + w.RegisterInputValueDefinitionVisitor(visitor) + w.RegisterInterfaceTypeDefinitionVisitor(visitor) + w.RegisterInterfaceTypeExtensionVisitor(visitor) + w.RegisterScalarTypeDefinitionVisitor(visitor) + w.RegisterScalarTypeExtensionVisitor(visitor) + w.RegisterUnionTypeDefinitionVisitor(visitor) + w.RegisterUnionTypeExtensionVisitor(visitor) + w.RegisterUnionMemberTypeVisitor(visitor) + w.RegisterEnumTypeDefinitionVisitor(visitor) + w.RegisterEnumTypeExtensionVisitor(visitor) + w.RegisterEnumValueDefinitionVisitor(visitor) + w.RegisterInputObjectTypeDefinitionVisitor(visitor) + w.RegisterInputObjectTypeExtensionVisitor(visitor) + w.RegisterDirectiveDefinitionVisitor(visitor) + w.RegisterDirectiveLocationVisitor(visitor) + w.RegisterSchemaDefinitionVisitor(visitor) + w.RegisterSchemaExtensionVisitor(visitor) + w.RegisterRootOperationTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterRootOperationTypeDefinitionVisitor(visitor EnterRootOperationTypeDefinitionVisitor) { + w.visitors.enterRootOperationTypeDefinition = append(w.visitors.enterRootOperationTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveRootOperationTypeDefinitionVisitor(visitor LeaveRootOperationTypeDefinitionVisitor) { + w.visitors.leaveRootOperationTypeDefinition = append(w.visitors.leaveRootOperationTypeDefinition, visitor) +} + +func (w *Walker) RegisterRootOperationTypeDefinitionVisitor(visitor RootOperationTypeDefinitionVisitor) { + w.RegisterEnterRootOperationTypeDefinitionVisitor(visitor) + w.RegisterLeaveRootOperationTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterSchemaDefinitionVisitor(visitor EnterSchemaDefinitionVisitor) { + w.visitors.enterSchemaDefinition = append(w.visitors.enterSchemaDefinition, visitor) +} + +func (w *Walker) RegisterLeaveSchemaDefinitionVisitor(visitor LeaveSchemaDefinitionVisitor) { + w.visitors.leaveSchemaDefinition = append(w.visitors.leaveSchemaDefinition, visitor) +} + +func (w *Walker) RegisterSchemaDefinitionVisitor(visitor SchemaDefinitionVisitor) { + w.RegisterEnterSchemaDefinitionVisitor(visitor) + w.RegisterLeaveSchemaDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterSchemaExtensionVisitor(visitor EnterSchemaExtensionVisitor) { + w.visitors.enterSchemaExtension = append(w.visitors.enterSchemaExtension, visitor) +} + +func (w *Walker) RegisterLeaveSchemaExtensionVisitor(visitor LeaveSchemaExtensionVisitor) { + w.visitors.leaveSchemaExtension = append(w.visitors.leaveSchemaExtension, visitor) +} + +func (w *Walker) RegisterSchemaExtensionVisitor(visitor SchemaExtensionVisitor) { + w.RegisterEnterSchemaExtensionVisitor(visitor) + w.RegisterLeaveSchemaExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterDirectiveLocationVisitor(visitor EnterDirectiveLocationVisitor) { + w.visitors.enterDirectiveLocation = append(w.visitors.enterDirectiveLocation, visitor) +} + +func (w *Walker) RegisterLeaveDirectiveLocationVisitor(visitor LeaveDirectiveLocationVisitor) { + w.visitors.leaveDirectiveLocation = append(w.visitors.leaveDirectiveLocation, visitor) +} + +func (w *Walker) RegisterDirectiveLocationVisitor(visitor DirectiveLocationVisitor) { + w.RegisterEnterDirectiveLocationVisitor(visitor) + w.RegisterLeaveDirectiveLocationVisitor(visitor) +} + +func (w *Walker) RegisterEnterDirectiveDefinitionVisitor(visitor EnterDirectiveDefinitionVisitor) { + w.visitors.enterDirectiveDefinition = append(w.visitors.enterDirectiveDefinition, visitor) +} + +func (w *Walker) RegisterLeaveDirectiveDefinitionVisitor(visitor LeaveDirectiveDefinitionVisitor) { + w.visitors.leaveDirectiveDefinition = append(w.visitors.leaveDirectiveDefinition, visitor) +} + +func (w *Walker) RegisterDirectiveDefinitionVisitor(visitor DirectiveDefinitionVisitor) { + w.RegisterEnterDirectiveDefinitionVisitor(visitor) + w.RegisterLeaveDirectiveDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterUnionMemberTypeVisitor(visitor EnterUnionMemberTypeVisitor) { + w.visitors.enterUnionMemberType = append(w.visitors.enterUnionMemberType, visitor) +} + +func (w *Walker) RegisterLeaveUnionMemberTypeVisitor(visitor LeaveUnionMemberTypeVisitor) { + w.visitors.leaveUnionMemberType = append(w.visitors.leaveUnionMemberType, visitor) +} + +func (w *Walker) RegisterUnionMemberTypeVisitor(visitor UnionMemberTypeVisitor) { + w.RegisterEnterUnionMemberTypeVisitor(visitor) + w.RegisterLeaveUnionMemberTypeVisitor(visitor) +} + +func (w *Walker) RegisterEnterInputObjectTypeDefinitionVisitor(visitor EnterInputObjectTypeDefinitionVisitor) { + w.visitors.enterInputObjectTypeDefinition = append(w.visitors.enterInputObjectTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveInputObjectTypeDefinitionVisitor(visitor LeaveInputObjectTypeDefinitionVisitor) { + w.visitors.leaveInputObjectTypeDefinition = append(w.visitors.leaveInputObjectTypeDefinition, visitor) +} + +func (w *Walker) RegisterInputObjectTypeDefinitionVisitor(visitor InputObjectTypeDefinitionVisitor) { + w.RegisterEnterInputObjectTypeDefinitionVisitor(visitor) + w.RegisterLeaveInputObjectTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterInputObjectTypeExtensionVisitor(visitor EnterInputObjectTypeExtensionVisitor) { + w.visitors.enterInputObjectTypeExtension = append(w.visitors.enterInputObjectTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveInputObjectTypeExtensionVisitor(visitor LeaveInputObjectTypeExtensionVisitor) { + w.visitors.leaveInputObjectTypeExtension = append(w.visitors.leaveInputObjectTypeExtension, visitor) +} + +func (w *Walker) RegisterInputObjectTypeExtensionVisitor(visitor InputObjectTypeExtensionVisitor) { + w.RegisterEnterInputObjectTypeExtensionVisitor(visitor) + w.RegisterLeaveInputObjectTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterEnumTypeDefinitionVisitor(visitor EnterEnumTypeDefinitionVisitor) { + w.visitors.enterEnumTypeDefinition = append(w.visitors.enterEnumTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveEnumTypeDefinitionVisitor(visitor LeaveEnumTypeDefinitionVisitor) { + w.visitors.leaveEnumTypeDefinition = append(w.visitors.leaveEnumTypeDefinition, visitor) +} + +func (w *Walker) RegisterEnumTypeDefinitionVisitor(visitor EnumTypeDefinitionVisitor) { + w.RegisterEnterEnumTypeDefinitionVisitor(visitor) + w.RegisterLeaveEnumTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterEnumTypeExtensionVisitor(visitor EnterEnumTypeExtensionVisitor) { + w.visitors.enterEnumTypeExtension = append(w.visitors.enterEnumTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveEnumTypeExtensionVisitor(visitor LeaveEnumTypeExtensionVisitor) { + w.visitors.leaveEnumTypeExtension = append(w.visitors.leaveEnumTypeExtension, visitor) +} + +func (w *Walker) RegisterEnumTypeExtensionVisitor(visitor EnumTypeExtensionVisitor) { + w.RegisterEnterEnumTypeExtensionVisitor(visitor) + w.RegisterLeaveEnumTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterEnumValueDefinitionVisitor(visitor EnterEnumValueDefinitionVisitor) { + w.visitors.enterEnumValueDefinition = append(w.visitors.enterEnumValueDefinition, visitor) +} + +func (w *Walker) RegisterLeaveEnumValueDefinitionVisitor(visitor LeaveEnumValueDefinitionVisitor) { + w.visitors.leaveEnumValueDefinition = append(w.visitors.leaveEnumValueDefinition, visitor) +} + +func (w *Walker) RegisterEnumValueDefinitionVisitor(visitor EnumValueDefinitionVisitor) { + w.RegisterEnterEnumValueDefinitionVisitor(visitor) + w.RegisterLeaveEnumValueDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterUnionTypeDefinitionVisitor(visitor EnterUnionTypeDefinitionVisitor) { + w.visitors.enterUnionTypeDefinition = append(w.visitors.enterUnionTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveUnionTypeDefinitionVisitor(visitor LeaveUnionTypeDefinitionVisitor) { + w.visitors.leaveUnionTypeDefinition = append(w.visitors.leaveUnionTypeDefinition, visitor) +} + +func (w *Walker) RegisterUnionTypeDefinitionVisitor(visitor UnionTypeDefinitionVisitor) { + w.RegisterEnterUnionTypeDefinitionVisitor(visitor) + w.RegisterLeaveUnionTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterUnionTypeExtensionVisitor(visitor EnterUnionTypeExtensionVisitor) { + w.visitors.enterUnionTypeExtension = append(w.visitors.enterUnionTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveUnionTypeExtensionVisitor(visitor LeaveUnionTypeExtensionVisitor) { + w.visitors.leaveUnionTypeExtension = append(w.visitors.leaveUnionTypeExtension, visitor) +} + +func (w *Walker) RegisterUnionTypeExtensionVisitor(visitor UnionTypeExtensionVisitor) { + w.RegisterEnterUnionTypeExtensionVisitor(visitor) + w.RegisterLeaveUnionTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterScalarTypeExtensionVisitor(visitor EnterScalarTypeExtensionVisitor) { + w.visitors.enterScalarTypeExtension = append(w.visitors.enterScalarTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveScalarTypeExtensionVisitor(visitor LeaveScalarTypeExtensionVisitor) { + w.visitors.leaveScalarTypeExtension = append(w.visitors.leaveScalarTypeExtension, visitor) +} + +func (w *Walker) RegisterScalarTypeExtensionVisitor(visitor ScalarTypeExtensionVisitor) { + w.RegisterEnterScalarTypeExtensionVisitor(visitor) + w.RegisterLeaveScalarTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterScalarTypeDefinitionVisitor(visitor EnterScalarTypeDefinitionVisitor) { + w.visitors.enterScalarTypeDefinition = append(w.visitors.enterScalarTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveScalarTypeDefinitionVisitor(visitor LeaveScalarTypeDefinitionVisitor) { + w.visitors.leaveScalarTypeDefinition = append(w.visitors.leaveScalarTypeDefinition, visitor) +} + +func (w *Walker) RegisterScalarTypeDefinitionVisitor(visitor ScalarTypeDefinitionVisitor) { + w.RegisterEnterScalarTypeDefinitionVisitor(visitor) + w.RegisterLeaveScalarTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterInterfaceTypeExtensionVisitor(visitor EnterInterfaceTypeExtensionVisitor) { + w.visitors.enterInterfaceTypeExtension = append(w.visitors.enterInterfaceTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveInterfaceTypeExtensionVisitor(visitor LeaveInterfaceTypeExtensionVisitor) { + w.visitors.leaveInterfaceTypeExtension = append(w.visitors.leaveInterfaceTypeExtension, visitor) +} + +func (w *Walker) RegisterInterfaceTypeExtensionVisitor(visitor InterfaceTypeExtensionVisitor) { + w.RegisterEnterInterfaceTypeExtensionVisitor(visitor) + w.RegisterLeaveInterfaceTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterInterfaceTypeDefinitionVisitor(visitor EnterInterfaceTypeDefinitionVisitor) { + w.visitors.enterInterfaceTypeDefinition = append(w.visitors.enterInterfaceTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveInterfaceTypeDefinitionVisitor(visitor LeaveInterfaceTypeDefinitionVisitor) { + w.visitors.leaveInterfaceTypeDefinition = append(w.visitors.leaveInterfaceTypeDefinition, visitor) +} + +func (w *Walker) RegisterInterfaceTypeDefinitionVisitor(visitor InterfaceTypeDefinitionVisitor) { + w.RegisterEnterInterfaceTypeDefinitionVisitor(visitor) + w.RegisterLeaveInterfaceTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterInputValueDefinitionVisitor(visitor EnterInputValueDefinitionVisitor) { + w.visitors.enterInputValueDefinition = append(w.visitors.enterInputValueDefinition, visitor) +} + +func (w *Walker) RegisterLeaveInputValueDefinitionVisitor(visitor LeaveInputValueDefinitionVisitor) { + w.visitors.leaveInputValueDefinition = append(w.visitors.leaveInputValueDefinition, visitor) +} + +func (w *Walker) RegisterInputValueDefinitionVisitor(visitor InputValueDefinitionVisitor) { + w.RegisterEnterInputValueDefinitionVisitor(visitor) + w.RegisterLeaveInputValueDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterFieldDefinitionVisitor(visitor EnterFieldDefinitionVisitor) { + w.visitors.enterFieldDefinition = append(w.visitors.enterFieldDefinition, visitor) +} + +func (w *Walker) RegisterLeaveFieldDefinitionVisitor(visitor LeaveFieldDefinitionVisitor) { + w.visitors.leaveFieldDefinition = append(w.visitors.leaveFieldDefinition, visitor) +} + +func (w *Walker) RegisterFieldDefinitionVisitor(visitor FieldDefinitionVisitor) { + w.RegisterEnterFieldDefinitionVisitor(visitor) + w.RegisterLeaveFieldDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterObjectTypeExtensionVisitor(visitor EnterObjectTypeExtensionVisitor) { + w.visitors.enterObjectTypeExtension = append(w.visitors.enterObjectTypeExtension, visitor) +} + +func (w *Walker) RegisterLeaveObjectTypeExtensionVisitor(visitor LeaveObjectTypeExtensionVisitor) { + w.visitors.leaveObjectTypeExtension = append(w.visitors.leaveObjectTypeExtension, visitor) +} + +func (w *Walker) RegisterObjectTypeExtensionVisitor(visitor ObjectTypeExtensionVisitor) { + w.RegisterEnterObjectTypeExtensionVisitor(visitor) + w.RegisterLeaveObjectTypeExtensionVisitor(visitor) +} + +func (w *Walker) RegisterEnterObjectTypeDefinitionVisitor(visitor EnterObjectTypeDefinitionVisitor) { + w.visitors.enterObjectTypeDefinition = append(w.visitors.enterObjectTypeDefinition, visitor) +} + +func (w *Walker) RegisterLeaveObjectTypeDefinitionVisitor(visitor LeaveObjectTypeDefinitionVisitor) { + w.visitors.leaveObjectTypeDefinition = append(w.visitors.leaveObjectTypeDefinition, visitor) +} + +func (w *Walker) RegisterObjectTypeDefinitionVisitor(visitor ObjectTypeDefinitionVisitor) { + w.RegisterEnterObjectTypeDefinitionVisitor(visitor) + w.RegisterLeaveObjectTypeDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterFieldVisitor(visitor EnterFieldVisitor) { + w.visitors.enterField = append(w.visitors.enterField, visitor) +} + +func (w *Walker) RegisterLeaveFieldVisitor(visitor LeaveFieldVisitor) { + w.visitors.leaveField = append(w.visitors.leaveField, visitor) +} + +func (w *Walker) RegisterFieldVisitor(visitor FieldVisitor) { + w.RegisterEnterFieldVisitor(visitor) + w.RegisterLeaveFieldVisitor(visitor) +} + +func (w *Walker) RegisterEnterSelectionSetVisitor(visitor EnterSelectionSetVisitor) { + w.visitors.enterSelectionSet = append(w.visitors.enterSelectionSet, visitor) +} + +func (w *Walker) RegisterLeaveSelectionSetVisitor(visitor LeaveSelectionSetVisitor) { + w.visitors.leaveSelectionSet = append(w.visitors.leaveSelectionSet, visitor) +} + +func (w *Walker) RegisterSelectionSetVisitor(visitor SelectionSetVisitor) { + w.RegisterEnterSelectionSetVisitor(visitor) + w.RegisterLeaveSelectionSetVisitor(visitor) +} + +func (w *Walker) RegisterEnterArgumentVisitor(visitor EnterArgumentVisitor) { + w.visitors.enterArgument = append(w.visitors.enterArgument, visitor) +} + +func (w *Walker) RegisterLeaveArgumentVisitor(visitor LeaveArgumentVisitor) { + w.visitors.leaveArgument = append(w.visitors.leaveArgument, visitor) +} + +func (w *Walker) RegisterArgumentVisitor(visitor ArgumentVisitor) { + w.RegisterEnterArgumentVisitor(visitor) + w.RegisterLeaveArgumentVisitor(visitor) +} + +func (w *Walker) RegisterEnterFragmentSpreadVisitor(visitor EnterFragmentSpreadVisitor) { + w.visitors.enterFragmentSpread = append(w.visitors.enterFragmentSpread, visitor) +} + +func (w *Walker) RegisterLeaveFragmentSpreadVisitor(visitor LeaveFragmentSpreadVisitor) { + w.visitors.leaveFragmentSpread = append(w.visitors.leaveFragmentSpread, visitor) +} + +func (w *Walker) RegisterFragmentSpreadVisitor(visitor FragmentSpreadVisitor) { + w.RegisterEnterFragmentSpreadVisitor(visitor) + w.RegisterLeaveFragmentSpreadVisitor(visitor) +} + +func (w *Walker) RegisterEnterInlineFragmentVisitor(visitor EnterInlineFragmentVisitor) { + w.visitors.enterInlineFragment = append(w.visitors.enterInlineFragment, visitor) +} + +func (w *Walker) RegisterLeaveInlineFragmentVisitor(visitor LeaveInlineFragmentVisitor) { + w.visitors.leaveInlineFragment = append(w.visitors.leaveInlineFragment, visitor) +} + +func (w *Walker) RegisterInlineFragmentVisitor(visitor InlineFragmentVisitor) { + w.RegisterEnterInlineFragmentVisitor(visitor) + w.RegisterLeaveInlineFragmentVisitor(visitor) +} + +func (w *Walker) RegisterEnterFragmentDefinitionVisitor(visitor EnterFragmentDefinitionVisitor) { + w.visitors.enterFragmentDefinition = append(w.visitors.enterFragmentDefinition, visitor) +} + +func (w *Walker) RegisterLeaveFragmentDefinitionVisitor(visitor LeaveFragmentDefinitionVisitor) { + w.visitors.leaveFragmentDefinition = append(w.visitors.leaveFragmentDefinition, visitor) +} + +func (w *Walker) RegisterFragmentDefinitionVisitor(visitor FragmentDefinitionVisitor) { + w.RegisterEnterFragmentDefinitionVisitor(visitor) + w.RegisterLeaveFragmentDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterVariableDefinitionVisitor(visitor EnterVariableDefinitionVisitor) { + w.visitors.enterVariableDefinition = append(w.visitors.enterVariableDefinition, visitor) +} + +func (w *Walker) RegisterLeaveVariableDefinitionVisitor(visitor LeaveVariableDefinitionVisitor) { + w.visitors.leaveVariableDefinition = append(w.visitors.leaveVariableDefinition, visitor) +} + +func (w *Walker) RegisterVariableDefinitionVisitor(visitor VariableDefinitionVisitor) { + w.RegisterEnterVariableDefinitionVisitor(visitor) + w.RegisterLeaveVariableDefinitionVisitor(visitor) +} + +func (w *Walker) RegisterEnterOperationVisitor(visitor EnterOperationDefinitionVisitor) { + w.visitors.enterOperation = append(w.visitors.enterOperation, visitor) +} + +func (w *Walker) RegisterLeaveOperationVisitor(visitor LeaveOperationDefinitionVisitor) { + w.visitors.leaveOperation = append(w.visitors.leaveOperation, visitor) +} + +func (w *Walker) RegisterOperationDefinitionVisitor(visitor OperationDefinitionVisitor) { + w.RegisterEnterOperationVisitor(visitor) + w.RegisterLeaveOperationVisitor(visitor) +} + +func (w *Walker) RegisterEnterDirectiveVisitor(visitor EnterDirectiveVisitor) { + w.visitors.enterDirective = append(w.visitors.enterDirective, visitor) +} + +func (w *Walker) RegisterLeaveDirectiveVisitor(visitor LeaveDirectiveVisitor) { + w.visitors.leaveDirective = append(w.visitors.leaveDirective, visitor) +} + +func (w *Walker) RegisterDirectiveVisitor(visitor DirectiveVisitor) { + w.RegisterEnterDirectiveVisitor(visitor) + w.RegisterLeaveDirectiveVisitor(visitor) +} + +func (w *Walker) RegisterAllNodesVisitor(visitor AllNodesVisitor) { + w.RegisterDocumentVisitor(visitor) + w.RegisterExecutableVisitor(visitor) + w.RegisterTypeSystemVisitor(visitor) +} + +func (w *Walker) RegisterEnterDocumentVisitor(visitor EnterDocumentVisitor) { + w.visitors.enterDocument = append(w.visitors.enterDocument, visitor) +} + +func (w *Walker) RegisterLeaveDocumentVisitor(visitor LeaveDocumentVisitor) { + w.visitors.leaveDocument = append(w.visitors.leaveDocument, visitor) +} + +func (w *Walker) RegisterDocumentVisitor(visitor DocumentVisitor) { + w.RegisterEnterDocumentVisitor(visitor) + w.RegisterLeaveDocumentVisitor(visitor) +} + +func (w *Walker) SetVisitorFilter(filter VisitorFilter) { + w.filter = filter +} + +// Walk initiates the walker to start walking the AST from the top root Node +func (w *Walker) Walk(document, definition *ast.Document, report *operationreport.Report) { + if report == nil { + w.Report = &operationreport.Report{} + } else { + w.Report = report + } + w.Ancestors = w.Ancestors[:0] + w.Path = w.Path[:0] + w.typeDefinitions = w.typeDefinitions[:0] + w.document = document + w.definition = definition + w.Depth = 0 + w.stop = false + w.walk() +} + +// Defer runs the provided func() after the current batch of visitors +// This gives you the possibility to execute some code that should e.g. run after all EnterField Visitors +func (w *Walker) Defer(fn func()) { + w.deferred = append(w.deferred, fn) +} + +func (w *Walker) runDeferred() { + if len(w.deferred) == 0 { + return + } + for i := range w.deferred { + w.deferred[i]() + } + w.deferred = w.deferred[:0] +} + +func (w *Walker) appendAncestor(ref int, kind ast.NodeKind) { + w.Ancestors = append(w.Ancestors, ast.Node{ + Kind: kind, + Ref: ref, + }) + + var typeName ast.ByteSlice + + switch kind { + case ast.NodeKindOperationDefinition: + switch w.document.OperationDefinitions[ref].OperationType { + case ast.OperationTypeQuery: + typeName = w.definition.Index.QueryTypeName + w.Path = append(w.Path, ast.PathItem{ + Kind: ast.FieldName, + FieldName: literal.QUERY, + }) + case ast.OperationTypeMutation: + typeName = w.definition.Index.MutationTypeName + w.Path = append(w.Path, ast.PathItem{ + Kind: ast.FieldName, + FieldName: literal.MUTATION, + }) + case ast.OperationTypeSubscription: + typeName = w.definition.Index.SubscriptionTypeName + w.Path = append(w.Path, ast.PathItem{ + Kind: ast.FieldName, + FieldName: literal.SUBSCRIPTION, + }) + default: + return + } + case ast.NodeKindInlineFragment: + if !w.document.InlineFragmentHasTypeCondition(ref) { + return + } + typeName = w.document.InlineFragmentTypeConditionName(ref) + case ast.NodeKindFragmentDefinition: + typeName = w.document.FragmentDefinitionTypeName(ref) + w.Path = append(w.Path, ast.PathItem{ + Kind: ast.FieldName, + ArrayIndex: 0, + FieldName: typeName, + }) + case ast.NodeKindField: + fieldName := w.document.FieldNameBytes(ref) + w.Path = append(w.Path, ast.PathItem{ + Kind: ast.FieldName, + ArrayIndex: 0, + FieldName: w.document.FieldAliasOrNameBytes(ref), + }) + if bytes.Equal(fieldName, literal.TYPENAME) { + typeName = literal.STRING + } + fields := w.definition.NodeFieldDefinitions(w.typeDefinitions[len(w.typeDefinitions)-1]) + for _, i := range fields { + if bytes.Equal(fieldName, w.definition.FieldDefinitionNameBytes(i)) { + typeName = w.definition.ResolveTypeNameBytes(w.definition.FieldDefinitionType(i)) + break + } + } + if typeName == nil { + typeName := w.definition.NodeNameBytes(w.typeDefinitions[len(w.typeDefinitions)-1]) + w.StopWithExternalErr(operationreport.ErrFieldUndefinedOnType(fieldName, typeName)) + return + } + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + w.EnclosingTypeDefinition = ast.Node{ + Kind: kind, + Ref: ref, + } + return + default: + return + } + + var exists bool + w.EnclosingTypeDefinition, exists = w.definition.Index.FirstNonExtensionNodeByNameBytes(typeName) + if !exists { + w.StopWithExternalErr(operationreport.ErrTypeUndefined(typeName)) + return + } + + w.typeDefinitions = append(w.typeDefinitions, w.EnclosingTypeDefinition) +} + +func (w *Walker) removeLastAncestor() { + + ancestor := w.Ancestors[len(w.Ancestors)-1] + w.Ancestors = w.Ancestors[:len(w.Ancestors)-1] + + switch ancestor.Kind { + case ast.NodeKindOperationDefinition, ast.NodeKindFragmentDefinition: + w.Path = w.Path[:len(w.Path)-1] + w.typeDefinitions = w.typeDefinitions[:len(w.typeDefinitions)-1] + w.EnclosingTypeDefinition.Kind = ast.NodeKindUnknown + w.EnclosingTypeDefinition.Ref = -1 + case ast.NodeKindInlineFragment: + if w.document.InlineFragmentHasTypeCondition(ancestor.Ref) { + w.typeDefinitions = w.typeDefinitions[:len(w.typeDefinitions)-1] + w.EnclosingTypeDefinition = w.typeDefinitions[len(w.typeDefinitions)-1] + } + case ast.NodeKindField: + w.Path = w.Path[:len(w.Path)-1] + w.typeDefinitions = w.typeDefinitions[:len(w.typeDefinitions)-1] + w.EnclosingTypeDefinition = w.typeDefinitions[len(w.typeDefinitions)-1] + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition: + w.EnclosingTypeDefinition.Ref = -1 + w.EnclosingTypeDefinition.Kind = ast.NodeKindUnknown + default: + return + } +} + +func (w *Walker) increaseDepth() { + w.Depth++ +} + +func (w *Walker) decreaseDepth() { + w.Depth-- +} + +func (w *Walker) walk() { + + if w.document == nil { + w.Report.AddInternalError(ErrDocumentMustNotBeNil) + return + } + + for i := 0; i < len(w.visitors.enterDocument); { + if w.filter == nil || w.filter.AllowVisitor(EnterDocument, 0, w.visitors.enterDocument[i]) { + w.visitors.enterDocument[i].EnterDocument(w.document, w.definition) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + return + } + i++ + } + + for i := range w.document.RootNodes { + switch w.document.RootNodes[i].Kind { + case ast.NodeKindOperationDefinition: + if w.definition == nil { + w.Report.AddInternalError(ErrDefinitionMustNotBeNil) + return + } + w.walkOperationDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindFragmentDefinition: + if w.definition == nil { + w.Report.AddInternalError(ErrDefinitionMustNotBeNil) + return + } + w.walkFragmentDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindSchemaDefinition: + w.walkSchemaDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindSchemaExtension: + w.walkSchemaExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindDirectiveDefinition: + w.walkDirectiveDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindObjectTypeDefinition: + w.walkObjectTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindObjectTypeExtension: + w.walkObjectTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindInterfaceTypeDefinition: + w.walkInterfaceTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindInterfaceTypeExtension: + w.walkInterfaceTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindScalarTypeDefinition: + w.walkScalarTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindScalarTypeExtension: + w.walkScalarTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindUnionTypeDefinition: + w.walkUnionTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindUnionTypeExtension: + w.walkUnionTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindEnumTypeDefinition: + w.walkEnumTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindEnumTypeExtension: + w.walkEnumTypeExtension(w.document.RootNodes[i].Ref) + case ast.NodeKindInputObjectTypeDefinition: + w.walkInputObjectTypeDefinition(w.document.RootNodes[i].Ref) + case ast.NodeKindInputObjectTypeExtension: + w.walkInputObjectTypeExtension(w.document.RootNodes[i].Ref) + } + + if w.stop { + return + } + if w.skip { + w.skip = false + return + } + } + + for i := 0; i < len(w.visitors.leaveDocument); { + if w.filter == nil || w.filter.AllowVisitor(LeaveDocument, 0, w.visitors.leaveDocument[i]) { + w.visitors.leaveDocument[i].LeaveDocument(w.document, w.definition) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + return + } + i++ + } +} + +func (w *Walker) walkOperationDefinition(ref int) { + w.increaseDepth() + + for i := 0; i < len(w.visitors.enterOperation); { + if w.filter == nil || w.filter.AllowVisitor(EnterOperation, ref, w.visitors.enterOperation[i]) { + w.visitors.enterOperation[i].EnterOperationDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindOperationDefinition) + if w.stop { + return + } + + if w.document.OperationDefinitions[ref].HasVariableDefinitions { + for _, i := range w.document.OperationDefinitions[ref].VariableDefinitions.Refs { + w.walkVariableDefinition(i) + if w.stop { + return + } + } + } + + if w.document.OperationDefinitions[ref].HasDirectives { + for _, i := range w.document.OperationDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.OperationDefinitions[ref].HasSelections { + w.walkSelectionSet(w.document.OperationDefinitions[ref].SelectionSet) + if w.stop { + return + } + } + + w.removeLastAncestor() + + for i := 0; i < len(w.visitors.leaveOperation); { + if w.filter == nil || w.filter.AllowVisitor(LeaveOperation, ref, w.visitors.leaveOperation[i]) { + w.visitors.leaveOperation[i].LeaveOperationDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkVariableDefinition(ref int) { + w.increaseDepth() + + for i := 0; i < len(w.visitors.enterVariableDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterVariableDefinition, ref, w.visitors.enterVariableDefinition[i]) { + w.visitors.enterVariableDefinition[i].EnterVariableDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindVariableDefinition) + if w.stop { + return + } + + if w.document.VariableDefinitions[ref].HasDirectives { + for _, i := range w.document.VariableDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + for i := 0; i < len(w.visitors.leaveVariableDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveVariableDefinition, ref, w.visitors.leaveVariableDefinition[i]) { + w.visitors.leaveVariableDefinition[i].LeaveVariableDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkSelectionSet(ref int) { + w.increaseDepth() + + for i := 0; i < len(w.visitors.enterSelectionSet); { + if w.filter == nil || w.filter.AllowVisitor(EnterSelectionSet, ref, w.visitors.enterSelectionSet[i]) { + w.visitors.enterSelectionSet[i].EnterSelectionSet(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindSelectionSet) + if w.stop { + return + } + +RefsChanged: + for { + refs := w.document.SelectionSets[ref].SelectionRefs + for i, j := range refs { + + w.SelectionsBefore = refs[:i] + w.SelectionsAfter = refs[i+1:] + + switch w.document.Selections[j].Kind { + case ast.SelectionKindField: + w.walkField(w.document.Selections[j].Ref) + case ast.SelectionKindFragmentSpread: + w.walkFragmentSpread(w.document.Selections[j].Ref) + case ast.SelectionKindInlineFragment: + w.walkInlineFragment(w.document.Selections[j].Ref) + } + + if w.stop { + return + } + if !w.refsEqual(refs, w.document.SelectionSets[ref].SelectionRefs) { + continue RefsChanged + } + } + break + } + + w.removeLastAncestor() + + for i := 0; i < len(w.visitors.leaveSelectionSet); { + if w.filter == nil || w.filter.AllowVisitor(LeaveSelectionSet, ref, w.visitors.leaveSelectionSet[i]) { + w.visitors.leaveSelectionSet[i].LeaveSelectionSet(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkField(ref int) { + w.increaseDepth() + + selectionsBefore := w.SelectionsBefore + selectionsAfter := w.SelectionsAfter + + w.setCurrent(ast.NodeKindField, ref) + + for i := 0; i < len(w.visitors.enterField); { + if w.filter == nil || w.filter.AllowVisitor(EnterField, ref, w.visitors.enterField[i]) { + w.visitors.enterField[i].EnterField(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.runDeferred() + + w.appendAncestor(ref, ast.NodeKindField) + if w.stop { + return + } + + if len(w.document.Fields[ref].Arguments.Refs) != 0 { + for _, i := range w.document.Fields[ref].Arguments.Refs { + w.walkArgument(i) + if w.stop { + return + } + } + } + + if w.document.Fields[ref].HasDirectives { + for _, i := range w.document.Fields[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.Fields[ref].HasSelections { + w.walkSelectionSet(w.document.Fields[ref].SelectionSet) + } + + w.removeLastAncestor() + + w.SelectionsBefore = selectionsBefore + w.SelectionsAfter = selectionsAfter + + w.setCurrent(ast.NodeKindField, ref) + + for i := 0; i < len(w.visitors.leaveField); { + if w.filter == nil || w.filter.AllowVisitor(LeaveField, ref, w.visitors.leaveField[i]) { + w.visitors.leaveField[i].LeaveField(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkDirective(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindDirective, ref) + + for i := 0; i < len(w.visitors.enterDirective); { + if w.filter == nil || w.filter.AllowVisitor(EnterDirective, ref, w.visitors.enterDirective[i]) { + w.visitors.enterDirective[i].EnterDirective(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindDirective) + if w.stop { + return + } + + if w.document.Directives[ref].HasArguments { + for _, i := range w.document.Directives[ref].Arguments.Refs { + w.walkArgument(i) + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindDirective, ref) + + for i := 0; i < len(w.visitors.leaveDirective); { + if w.filter == nil || w.filter.AllowVisitor(LeaveDirective, ref, w.visitors.leaveDirective[i]) { + w.visitors.leaveDirective[i].LeaveDirective(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkArgument(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindArgument, ref) + + for i := 0; i < len(w.visitors.enterArgument); { + if w.filter == nil || w.filter.AllowVisitor(EnterArgument, ref, w.visitors.enterArgument[i]) { + w.visitors.enterArgument[i].EnterArgument(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + for i := 0; i < len(w.visitors.leaveArgument); { + if w.filter == nil || w.filter.AllowVisitor(LeaveArgument, ref, w.visitors.leaveArgument[i]) { + w.visitors.leaveArgument[i].LeaveArgument(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkFragmentSpread(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindFragmentSpread, ref) + + for i := 0; i < len(w.visitors.enterFragmentSpread); { + if w.filter == nil || w.filter.AllowVisitor(EnterFragmentSpread, ref, w.visitors.enterFragmentSpread[i]) { + w.visitors.enterFragmentSpread[i].EnterFragmentSpread(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + for i := 0; i < len(w.visitors.leaveFragmentSpread); { + if w.filter == nil || w.filter.AllowVisitor(LeaveFragmentSpread, ref, w.visitors.leaveFragmentSpread[i]) { + w.visitors.leaveFragmentSpread[i].LeaveFragmentSpread(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInlineFragment(ref int) { + w.increaseDepth() + + selectionsBefore := w.SelectionsBefore + selectionsAfter := w.SelectionsAfter + + w.setCurrent(ast.NodeKindInlineFragment, ref) + + for i := 0; i < len(w.visitors.enterInlineFragment); { + if w.filter == nil || w.filter.AllowVisitor(EnterInlineFragment, ref, w.visitors.enterInlineFragment[i]) { + w.visitors.enterInlineFragment[i].EnterInlineFragment(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInlineFragment) + if w.stop { + return + } + + if w.document.InlineFragments[ref].HasDirectives { + for _, i := range w.document.InlineFragments[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.InlineFragments[ref].HasSelections { + w.walkSelectionSet(w.document.InlineFragments[ref].SelectionSet) + if w.stop { + return + } + } + + w.removeLastAncestor() + + w.SelectionsBefore = selectionsBefore + w.SelectionsAfter = selectionsAfter + + w.setCurrent(ast.NodeKindInlineFragment, ref) + + for i := 0; i < len(w.visitors.leaveInlineFragment); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInlineFragment, ref, w.visitors.leaveInlineFragment[i]) { + w.visitors.leaveInlineFragment[i].LeaveInlineFragment(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkFragmentDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindFragmentDefinition, ref) + + for i := 0; i < len(w.visitors.enterFragmentDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterFragmentDefinition, ref, w.visitors.enterFragmentDefinition[i]) { + w.visitors.enterFragmentDefinition[i].EnterFragmentDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindFragmentDefinition) + if w.stop { + return + } + + if w.document.FragmentDefinitions[ref].HasDirectives { + for _, i := range w.document.FragmentDefinitions[ref].Directives.Refs { + w.walkDirective(i) + } + } + + if w.document.FragmentDefinitions[ref].HasSelections { + w.walkSelectionSet(w.document.FragmentDefinitions[ref].SelectionSet) + if w.stop { + return + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindFragmentDefinition, ref) + + for i := 0; i < len(w.visitors.leaveFragmentDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveFragmentDefinition, ref, w.visitors.leaveFragmentDefinition[i]) { + w.visitors.leaveFragmentDefinition[i].LeaveFragmentDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkObjectTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindObjectTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterObjectTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterObjectTypeDefinition, ref, w.visitors.enterObjectTypeDefinition[i]) { + w.visitors.enterObjectTypeDefinition[i].EnterObjectTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindObjectTypeDefinition) + if w.stop { + return + } + + if w.document.ObjectTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.ObjectTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.ObjectTypeDefinitions[ref].HasFieldDefinitions { + for _, i := range w.document.ObjectTypeDefinitions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindObjectTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveObjectTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveObjectTypeDefinition, ref, w.visitors.leaveObjectTypeDefinition[i]) { + w.visitors.leaveObjectTypeDefinition[i].LeaveObjectTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkObjectTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindObjectTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterObjectTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterObjectTypeExtension, ref, w.visitors.enterObjectTypeExtension[i]) { + w.visitors.enterObjectTypeExtension[i].EnterObjectTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindObjectTypeExtension) + if w.stop { + return + } + + if w.document.ObjectTypeExtensions[ref].HasDirectives { + for _, i := range w.document.ObjectTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.ObjectTypeExtensions[ref].HasFieldDefinitions { + for _, i := range w.document.ObjectTypeExtensions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindObjectTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveObjectTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveObjectTypeExtension, ref, w.visitors.leaveObjectTypeExtension[i]) { + w.visitors.leaveObjectTypeExtension[i].LeaveObjectTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkFieldDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindFieldDefinition, ref) + + for i := 0; i < len(w.visitors.enterFieldDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterFieldDefinition, ref, w.visitors.enterFieldDefinition[i]) { + w.visitors.enterFieldDefinition[i].EnterFieldDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindFieldDefinition) + if w.stop { + return + } + + if w.document.FieldDefinitions[ref].HasArgumentsDefinitions { + for _, i := range w.document.FieldDefinitions[ref].ArgumentsDefinition.Refs { + w.walkInputValueDefinition(i) + if w.stop { + return + } + } + } + + if w.document.FieldDefinitions[ref].HasDirectives { + for _, i := range w.document.FieldDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindFieldDefinition, ref) + + for i := 0; i < len(w.visitors.leaveFieldDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveFieldDefinition, ref, w.visitors.leaveFieldDefinition[i]) { + w.visitors.leaveFieldDefinition[i].LeaveFieldDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInputValueDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindInputValueDefinition, ref) + + for i := 0; i < len(w.visitors.enterInputValueDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterInputValueDefinition, ref, w.visitors.enterInputValueDefinition[i]) { + w.visitors.enterInputValueDefinition[i].EnterInputValueDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInputValueDefinition) + if w.stop { + return + } + + if w.document.InputValueDefinitions[ref].HasDirectives { + for _, i := range w.document.InputValueDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindInputValueDefinition, ref) + + for i := 0; i < len(w.visitors.leaveInputValueDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInputValueDefinition, ref, w.visitors.leaveInputValueDefinition[i]) { + w.visitors.leaveInputValueDefinition[i].LeaveInputValueDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInterfaceTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindInterfaceTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterInterfaceTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterInterfaceTypeDefinition, ref, w.visitors.enterInterfaceTypeDefinition[i]) { + w.visitors.enterInterfaceTypeDefinition[i].EnterInterfaceTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInterfaceTypeDefinition) + if w.stop { + return + } + + if w.document.InterfaceTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.InterfaceTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.InterfaceTypeDefinitions[ref].HasFieldDefinitions { + for _, i := range w.document.InterfaceTypeDefinitions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindInterfaceTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveInterfaceTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInterfaceTypeDefinition, ref, w.visitors.leaveInterfaceTypeDefinition[i]) { + w.visitors.leaveInterfaceTypeDefinition[i].LeaveInterfaceTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInterfaceTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindInterfaceTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterInterfaceTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterInterfaceTypeExtension, ref, w.visitors.enterInterfaceTypeExtension[i]) { + w.visitors.enterInterfaceTypeExtension[i].EnterInterfaceTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInterfaceTypeExtension) + if w.stop { + return + } + + if w.document.InterfaceTypeExtensions[ref].HasDirectives { + for _, i := range w.document.InterfaceTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.InterfaceTypeExtensions[ref].HasFieldDefinitions { + for _, i := range w.document.InterfaceTypeExtensions[ref].FieldsDefinition.Refs { + w.walkFieldDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindInterfaceTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveInterfaceTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInterfaceTypeExtension, ref, w.visitors.leaveInterfaceTypeExtension[i]) { + w.visitors.leaveInterfaceTypeExtension[i].LeaveInterfaceTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkScalarTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindScalarTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterScalarTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterScalarTypeDefinition, ref, w.visitors.enterScalarTypeDefinition[i]) { + w.visitors.enterScalarTypeDefinition[i].EnterScalarTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindScalarTypeDefinition) + if w.stop { + return + } + + if w.document.ScalarTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.ScalarTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindScalarTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveScalarTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveScalarTypeDefinition, ref, w.visitors.leaveScalarTypeDefinition[i]) { + w.visitors.leaveScalarTypeDefinition[i].LeaveScalarTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkScalarTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindScalarTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterScalarTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterScalarTypeExtension, ref, w.visitors.enterScalarTypeExtension[i]) { + w.visitors.enterScalarTypeExtension[i].EnterScalarTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindScalarTypeExtension) + if w.stop { + return + } + + if w.document.ScalarTypeExtensions[ref].HasDirectives { + for _, i := range w.document.ScalarTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindScalarTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveScalarTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveScalarTypeExtension, ref, w.visitors.leaveScalarTypeExtension[i]) { + w.visitors.leaveScalarTypeExtension[i].LeaveScalarTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkUnionTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindUnionTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterUnionTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterUnionTypeDefinition, ref, w.visitors.enterUnionTypeDefinition[i]) { + w.visitors.enterUnionTypeDefinition[i].EnterUnionTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindUnionTypeDefinition) + if w.stop { + return + } + + if w.document.UnionTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.UnionTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.UnionTypeDefinitions[ref].HasUnionMemberTypes { + for _, i := range w.document.UnionTypeDefinitions[ref].UnionMemberTypes.Refs { + w.walkUnionMemberType(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindUnionTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveUnionTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveUnionTypeDefinition, ref, w.visitors.leaveUnionTypeDefinition[i]) { + w.visitors.leaveUnionTypeDefinition[i].LeaveUnionTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkUnionTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindUnionTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterUnionTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterUnionTypeExtension, ref, w.visitors.enterUnionTypeExtension[i]) { + w.visitors.enterUnionTypeExtension[i].EnterUnionTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindUnionTypeExtension) + if w.stop { + return + } + + if w.document.UnionTypeExtensions[ref].HasDirectives { + for _, i := range w.document.UnionTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.UnionTypeExtensions[ref].HasUnionMemberTypes { + for _, i := range w.document.UnionTypeExtensions[ref].UnionMemberTypes.Refs { + w.walkUnionMemberType(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindUnionTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveUnionTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveUnionTypeExtension, ref, w.visitors.leaveUnionTypeExtension[i]) { + w.visitors.leaveUnionTypeExtension[i].LeaveUnionTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkUnionMemberType(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindUnionMemberType, ref) + + for i := 0; i < len(w.visitors.enterUnionMemberType); { + if w.filter == nil || w.filter.AllowVisitor(EnterUnionMemberType, ref, w.visitors.enterUnionMemberType[i]) { + w.visitors.enterUnionMemberType[i].EnterUnionMemberType(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + for i := 0; i < len(w.visitors.leaveUnionMemberType); { + if w.filter == nil || w.filter.AllowVisitor(LeaveUnionMemberType, ref, w.visitors.leaveUnionMemberType[i]) { + w.visitors.leaveUnionMemberType[i].LeaveUnionMemberType(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkEnumTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindEnumTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterEnumTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterEnumTypeDefinition, ref, w.visitors.enterEnumTypeDefinition[i]) { + w.visitors.enterEnumTypeDefinition[i].EnterEnumTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindEnumTypeDefinition) + if w.stop { + return + } + + if w.document.EnumTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.EnumTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.EnumTypeDefinitions[ref].HasEnumValuesDefinition { + for _, i := range w.document.EnumTypeDefinitions[ref].EnumValuesDefinition.Refs { + w.walkEnumValueDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindEnumTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveEnumTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveEnumTypeDefinition, ref, w.visitors.leaveEnumTypeDefinition[i]) { + w.visitors.leaveEnumTypeDefinition[i].LeaveEnumTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkEnumTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindEnumTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterEnumTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterEnumTypeExtension, ref, w.visitors.enterEnumTypeExtension[i]) { + w.visitors.enterEnumTypeExtension[i].EnterEnumTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindEnumTypeExtension) + if w.stop { + return + } + + if w.document.EnumTypeExtensions[ref].HasDirectives { + for _, i := range w.document.EnumTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.EnumTypeExtensions[ref].HasEnumValuesDefinition { + for _, i := range w.document.EnumTypeExtensions[ref].EnumValuesDefinition.Refs { + w.walkEnumValueDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindEnumTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveEnumTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveEnumTypeExtension, ref, w.visitors.leaveEnumTypeExtension[i]) { + w.visitors.leaveEnumTypeExtension[i].LeaveEnumTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkEnumValueDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindEnumValueDefinition, ref) + + for i := 0; i < len(w.visitors.enterEnumValueDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterEnumValueDefinition, ref, w.visitors.enterEnumValueDefinition[i]) { + w.visitors.enterEnumValueDefinition[i].EnterEnumValueDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindEnumValueDefinition) + if w.stop { + return + } + + if w.document.EnumValueDefinitions[ref].HasDirectives { + for _, i := range w.document.EnumValueDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindEnumValueDefinition, ref) + + for i := 0; i < len(w.visitors.leaveEnumValueDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveEnumValueDefinition, ref, w.visitors.leaveEnumValueDefinition[i]) { + w.visitors.leaveEnumValueDefinition[i].LeaveEnumValueDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInputObjectTypeDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindInputObjectTypeDefinition, ref) + + for i := 0; i < len(w.visitors.enterInputObjectTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterInputObjectTypeDefinition, ref, w.visitors.enterInputObjectTypeDefinition[i]) { + w.visitors.enterInputObjectTypeDefinition[i].EnterInputObjectTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInputObjectTypeDefinition) + if w.stop { + return + } + + if w.document.InputObjectTypeDefinitions[ref].HasDirectives { + for _, i := range w.document.InputObjectTypeDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.InputObjectTypeDefinitions[ref].HasInputFieldsDefinition { + for _, i := range w.document.InputObjectTypeDefinitions[ref].InputFieldsDefinition.Refs { + w.walkInputValueDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindInputObjectTypeDefinition, ref) + + for i := 0; i < len(w.visitors.leaveInputObjectTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInputObjectTypeDefinition, ref, w.visitors.leaveInputObjectTypeDefinition[i]) { + w.visitors.leaveInputObjectTypeDefinition[i].LeaveInputObjectTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkInputObjectTypeExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindInputObjectTypeExtension, ref) + + for i := 0; i < len(w.visitors.enterInputObjectTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterInputObjectTypeExtension, ref, w.visitors.enterInputObjectTypeExtension[i]) { + w.visitors.enterInputObjectTypeExtension[i].EnterInputObjectTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindInputObjectTypeExtension) + if w.stop { + return + } + + if w.document.InputObjectTypeExtensions[ref].HasDirectives { + for _, i := range w.document.InputObjectTypeExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + if w.document.InputObjectTypeExtensions[ref].HasInputFieldsDefinition { + for _, i := range w.document.InputObjectTypeExtensions[ref].InputFieldsDefinition.Refs { + w.walkInputValueDefinition(i) + if w.stop { + return + } + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindInputObjectTypeExtension, ref) + + for i := 0; i < len(w.visitors.leaveInputObjectTypeExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveInputObjectTypeExtension, ref, w.visitors.leaveInputObjectTypeExtension[i]) { + w.visitors.leaveInputObjectTypeExtension[i].LeaveInputObjectTypeExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkDirectiveDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindDirectiveDefinition, ref) + + for i := 0; i < len(w.visitors.enterDirectiveDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterDirectiveDefinition, ref, w.visitors.enterDirectiveDefinition[i]) { + w.visitors.enterDirectiveDefinition[i].EnterDirectiveDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindDirectiveDefinition) + if w.stop { + return + } + + if w.document.DirectiveDefinitions[ref].HasArgumentsDefinitions { + for _, i := range w.document.DirectiveDefinitions[ref].ArgumentsDefinition.Refs { + w.walkInputValueDefinition(i) + if w.stop { + return + } + } + } + + iter := w.document.DirectiveDefinitions[ref].DirectiveLocations.Iterable() + for iter.Next() { + w.walkDirectiveLocation(iter.Value()) + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindDirectiveDefinition, ref) + + for i := 0; i < len(w.visitors.leaveDirectiveDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveDirectiveDefinition, ref, w.visitors.leaveDirectiveDefinition[i]) { + w.visitors.leaveDirectiveDefinition[i].LeaveDirectiveDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkDirectiveLocation(location ast.DirectiveLocation) { + w.increaseDepth() + + for i := 0; i < len(w.visitors.enterDirectiveLocation); { + if w.filter == nil || w.filter.AllowVisitor(EnterDirectiveLocation, 0, w.visitors.enterDirectiveLocation[i]) { + w.visitors.enterDirectiveLocation[i].EnterDirectiveLocation(location) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + for i := 0; i < len(w.visitors.leaveDirectiveLocation); { + if w.filter == nil || w.filter.AllowVisitor(LeaveDirectiveLocation, 0, w.visitors.leaveDirectiveLocation[i]) { + w.visitors.leaveDirectiveLocation[i].LeaveDirectiveLocation(location) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkSchemaDefinition(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindSchemaDefinition, ref) + + for i := 0; i < len(w.visitors.enterSchemaDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterSchemaDefinition, ref, w.visitors.enterSchemaDefinition[i]) { + w.visitors.enterSchemaDefinition[i].EnterSchemaDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindSchemaDefinition) + if w.stop { + return + } + + if w.document.SchemaDefinitions[ref].HasDirectives { + for _, i := range w.document.SchemaDefinitions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + for _, i := range w.document.SchemaDefinitions[ref].RootOperationTypeDefinitions.Refs { + w.walkRootOperationTypeDefinition(i) + if w.stop { + return + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindSchemaDefinition, ref) + + for i := 0; i < len(w.visitors.leaveSchemaDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveSchemaDefinition, ref, w.visitors.leaveSchemaDefinition[i]) { + w.visitors.leaveSchemaDefinition[i].LeaveSchemaDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkSchemaExtension(ref int) { + w.increaseDepth() + + w.setCurrent(ast.NodeKindSchemaExtension, ref) + + for i := 0; i < len(w.visitors.enterSchemaExtension); { + if w.filter == nil || w.filter.AllowVisitor(EnterSchemaExtension, ref, w.visitors.enterSchemaExtension[i]) { + w.visitors.enterSchemaExtension[i].EnterSchemaExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.appendAncestor(ref, ast.NodeKindSchemaExtension) + if w.stop { + return + } + + if w.document.SchemaExtensions[ref].HasDirectives { + for _, i := range w.document.SchemaExtensions[ref].Directives.Refs { + w.walkDirective(i) + if w.stop { + return + } + } + } + + for _, i := range w.document.SchemaExtensions[ref].RootOperationTypeDefinitions.Refs { + w.walkRootOperationTypeDefinition(i) + if w.stop { + return + } + } + + w.removeLastAncestor() + + w.setCurrent(ast.NodeKindSchemaExtension, ref) + + for i := 0; i < len(w.visitors.leaveSchemaExtension); { + if w.filter == nil || w.filter.AllowVisitor(LeaveSchemaExtension, ref, w.visitors.leaveSchemaExtension[i]) { + w.visitors.leaveSchemaExtension[i].LeaveSchemaExtension(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) walkRootOperationTypeDefinition(ref int) { + w.increaseDepth() + + for i := 0; i < len(w.visitors.enterRootOperationTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(EnterRootOperationTypeDefinition, ref, w.visitors.enterRootOperationTypeDefinition[i]) { + w.visitors.enterRootOperationTypeDefinition[i].EnterRootOperationTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + for i := 0; i < len(w.visitors.leaveRootOperationTypeDefinition); { + if w.filter == nil || w.filter.AllowVisitor(LeaveRootOperationTypeDefinition, ref, w.visitors.leaveRootOperationTypeDefinition[i]) { + w.visitors.leaveRootOperationTypeDefinition[i].LeaveRootOperationTypeDefinition(ref) + } + if w.revisit { + w.revisit = false + continue + } + if w.stop { + return + } + if w.skip { + w.skip = false + w.decreaseDepth() + return + } + i++ + } + + w.decreaseDepth() +} + +func (w *Walker) refsEqual(left, right []int) bool { + if len(left) != len(right) { + return false + } + for i := range left { + if left[i] != right[i] { + return false + } + } + return true +} + +func (w *Walker) SkipNode() { + w.skip = true +} + +func (w *Walker) Stop() { + w.stop = true +} + +func (w *Walker) RevisitNode() { + w.revisit = true +} + +func (w *Walker) StopWithInternalErr(err error) { + w.stop = true + w.Report.AddInternalError(err) +} + +func (w *Walker) HandleInternalErr(err error) bool { + if err != nil { + w.StopWithInternalErr(err) + return true + } + return false +} + +func (w *Walker) StopWithExternalErr(err operationreport.ExternalError) { + w.stop = true + err.Path = w.Path + w.Report.AddExternalError(err) +} + +func (w *Walker) StopWithErr(internal error, external operationreport.ExternalError) { + w.stop = true + external.Path = w.Path + w.Report.AddInternalError(internal) + w.Report.AddExternalError(external) +} + +func (w *Walker) ArgumentInputValueDefinition(argument int) (definition int, exits bool) { + argumentName := w.document.ArgumentNameBytes(argument) + ancestor := w.Ancestors[len(w.Ancestors)-1] + switch ancestor.Kind { + case ast.NodeKindField: + fieldName := w.document.FieldNameBytes(ancestor.Ref) + fieldTypeDef := w.typeDefinitions[len(w.typeDefinitions)-2] + definition = w.definition.NodeFieldDefinitionArgumentDefinitionByName(fieldTypeDef, fieldName, argumentName) + exits = definition != -1 + case ast.NodeKindDirective: + directiveName := w.document.DirectiveNameBytes(ancestor.Ref) + definition = w.definition.DirectiveArgumentInputValueDefinition(directiveName, argumentName) + exits = definition != -1 + } + return +} + +// FieldDefinitionWithExists +// Deprecated: use FieldDefinition +func (w *Walker) FieldDefinitionWithExists(field int) (definition int, exists bool) { + return w.FieldDefinition(field) +} + +// FieldDefinition - returns field definition ref from schema definition +// field - ref to a field from operation +// returns: +// definition - ref to a field definition from schema definition document if it is exists otherwise returns ast.InvalidRef +// exists - true if field exists +func (w *Walker) FieldDefinition(field int) (definition int, exists bool) { + fieldName := w.document.FieldNameBytes(field) + return w.definition.NodeFieldDefinitionByName(w.EnclosingTypeDefinition, fieldName) +} + +func (w *Walker) Ancestor() ast.Node { + if len(w.Ancestors) == 0 { + return ast.InvalidNode + } + return w.Ancestors[len(w.Ancestors)-1] +} + +func (w *Walker) AncestorNameBytes() ast.ByteSlice { + if len(w.Ancestors) == 0 { + return nil + } + return w.document.NodeNameBytes(w.Ancestors[len(w.Ancestors)-1]) +} + +func (w *Walker) FieldDefinitionDirectiveArgumentValueByName(field int, directiveName, argumentName ast.ByteSlice) (ast.Value, bool) { + definition, exists := w.FieldDefinition(field) + if !exists { + return ast.Value{}, false + } + + directive, exists := w.definition.FieldDefinitionDirectiveByName(definition, directiveName) + if !exists { + return ast.Value{}, false + } + + return w.definition.DirectiveArgumentValueByName(directive, argumentName) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/asyncapi.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/asyncapi.go new file mode 100644 index 00000000000..8595ae6048a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/asyncapi.go @@ -0,0 +1,655 @@ +package asyncapi + +import ( + "bytes" + "errors" + "fmt" + "strconv" + + "github.com/asyncapi/parser-go/pkg/parser" + "github.com/buger/jsonparser" + "github.com/iancoleman/strcase" +) + +type ChannelItemKind string + +const ( + ChannelItemKindSubscription ChannelItemKind = "subscription" + // ChannelItemKindPublish ChannelItemKind = "publish" +) + +const ( + ChannelsKey = "channels" + SubscribeKey = "subscribe" + MessageKey = "message" + PayloadKey = "payload" + PropertiesKey = "properties" + EnumKey = "enum" + ServersKey = "servers" + URLKey = "url" + ProtocolKey = "protocol" + ProtocolVersionKey = "protocolVersion" + DescriptionKey = "description" + NameKey = "name" + TitleKey = "title" + SummaryKey = "summary" + TypeKey = "type" + FormatKey = "format" + MinimumKey = "minimum" + MaximumKey = "maximum" + OperationIDKey = "operationId" + SecurityKey = "security" + BindingsKey = "bindings" + KafkaKey = "kafka" + TraitsKey = "traits" + ParametersKey = "parameters" + SchemaKey = "schema" + InfoKey = "info" + VersionKey = "version" +) + +type AsyncAPI struct { + Info *Info + Channels map[string]*ChannelItem + Servers map[string]*Server +} + +type Info struct { + Title string + Version string +} + +type SecurityRequirement struct { + Requirements map[string][]string +} + +type Binding struct { + Value []byte + ValueType jsonparser.ValueType +} + +// Server object is defined here: +// https://www.asyncapi.com/docs/reference/specification/v2.4.0#serverObject +type Server struct { + URL string + Protocol string + ProtocolVersion string + Description string + Security []*SecurityRequirement + Bindings map[string]map[string]*Binding +} + +// OperationTrait object is defined here: +// https://www.asyncapi.com/docs/reference/specification/v2.4.0#operationTraitObject +type OperationTrait struct { + Bindings map[string]map[string]*Binding +} + +// ChannelItem object is defined here: +// https://www.asyncapi.com/docs/reference/specification/v2.4.0#channelItemObject +type ChannelItem struct { + Kind ChannelItemKind + Message *Message + Parameters map[string]string + OperationID string + Traits []*OperationTrait + Servers []string +} + +type Enum struct { + Value []byte + ValueType jsonparser.ValueType +} + +// Property object is derived from Schema object. +// https://www.asyncapi.com/docs/reference/specification/v2.4.0#schemaObject +type Property struct { + Description string + Minimum int + Maximum int + Type string + Format string + Enum []*Enum +} + +// Payload is definition of the message payload. It can be of any type but defaults to Schema object. +// It must match the schema format, including encoding type - e.g Avro should be inlined as +// either a YAML or JSON object NOT a string to be parsed as YAML or JSON. +type Payload struct { + Type string + Properties map[string]*Property +} + +// Message object is defined here: +// https://www.asyncapi.com/docs/reference/specification/v2.4.0#messageObject +type Message struct { + Name string + Summary string + Title string + Description string + Payload *Payload +} + +type walker struct { + document *bytes.Buffer + asyncapi *AsyncAPI +} + +func extractStringArray(key string, data []byte) ([]string, error) { + var result []string + _, err := jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, _ int, _ error) { + result = append(result, string(value)) + }, key) + if err != nil { + return nil, err + } + return result, nil +} + +func extractString(key string, data []byte) (string, error) { + value, dataType, _, err := jsonparser.Get(data, key) + if err != nil { + return "", err + } + if dataType != jsonparser.String { + return "", fmt.Errorf("key: %s has to be a string", key) + } + return string(value), nil +} + +func extractInteger(key string, data []byte) (int, error) { + value, dataType, _, err := jsonparser.Get(data, key) + if err != nil { + return 0, err + } + if dataType != jsonparser.Number { + return 0, fmt.Errorf("key: %s has to be a number", key) + } + return strconv.Atoi(string(value)) +} + +func (w *walker) enterPropertyObject(channelName, key string, data []byte) error { + property := &Property{} + // Not mandatory + description, err := extractString(DescriptionKey, data) + if err == nil { + property.Description = description + } + + // Not mandatory + format, err := extractString(FormatKey, data) + if err == nil { + property.Format = format + } + + // Mandatory + tpe, err := extractString(TypeKey, data) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("property: %s is required in %s, channel: %s", TypeKey, key, channelName) + } + if err != nil { + return err + } + property.Type = tpe + + // Not mandatory + minimum, err := extractInteger(MinimumKey, data) + if err == nil { + property.Minimum = minimum + } + + // Not mandatory + maximum, err := extractInteger(MaximumKey, data) + if err == nil { + property.Maximum = maximum + } + + // Not mandatory + _, err = jsonparser.ArrayEach(data, func(enumValue []byte, dataType jsonparser.ValueType, _ int, err error) { + property.Enum = append(property.Enum, &Enum{ + Value: enumValue, + ValueType: dataType, + }) + }, EnumKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + err = nil + } + if err != nil { + return err + } + + channelItem, ok := w.asyncapi.Channels[channelName] + if !ok { + return fmt.Errorf("channel: %s is missing", channelName) + } + // Field names should use camelCase. Many GraphQL clients are written in JavaScript, Java, Kotlin, or Swift, + // all of which recommend camelCase for variable names. + channelItem.Message.Payload.Properties[strcase.ToLowerCamel(string(key))] = property + return nil +} + +func (w *walker) enterPropertiesObject(channelName string, data []byte) error { + propertiesValue, dataType, _, err := jsonparser.Get(data, PropertiesKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("key: %s is missing", PropertiesKey) + } + if dataType != jsonparser.Object { + return fmt.Errorf("key: %s has to be a JSON object", propertiesValue) + } + + return jsonparser.ObjectEach(propertiesValue, func(key []byte, value []byte, dataType jsonparser.ValueType, _ int) error { + return w.enterPropertyObject(channelName, string(key), value) + }) +} + +func (w *walker) enterPayloadObject(channelName string, data []byte) error { + payload, dataType, _, err := jsonparser.Get(data, PayloadKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("key: %s is missing", PayloadKey) + } + if dataType != jsonparser.Object { + return fmt.Errorf("key: %s has to be a JSON object", PayloadKey) + } + + p := &Payload{Properties: make(map[string]*Property)} + typeValue, err := extractString(TypeKey, payload) + if err == nil { + p.Type = typeValue + } + + channel, ok := w.asyncapi.Channels[channelName] + if !ok { + return fmt.Errorf("channel: %s is missing", channelName) + } + channel.Message.Payload = p + return w.enterPropertiesObject(channelName, payload) +} + +func (w *walker) enterMessageObject(channelName string, data []byte) error { + msg := &Message{} + name, err := extractString(NameKey, data) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + name = channelName + err = nil + } + if err != nil { + return err + } + msg.Name = name + + summary, err := extractString(SummaryKey, data) + if err == nil { + msg.Summary = summary + } + + title, err := extractString(TitleKey, data) + if err == nil { + msg.Title = title + } + + description, err := extractString(DescriptionKey, data) + if err == nil { + msg.Description = description + } + channel, ok := w.asyncapi.Channels[channelName] + if !ok { + return fmt.Errorf("channel: %s is missing", channelName) + } + channel.Message = msg + return w.enterPayloadObject(channelName, data) +} + +func (w *walker) enterOperationTraitsObject(channelName string, data []byte) error { + // Not Mandatory + traitsValue, dataType, _, err := jsonparser.Get(data, TraitsKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if dataType != jsonparser.Array { + return errors.New("traits has to be an array") + } + opt := &OperationTrait{ + Bindings: make(map[string]map[string]*Binding), + } + + var bindingValues [][]byte + _, err = jsonparser.ArrayEach(traitsValue, func(bindingValue []byte, dataType jsonparser.ValueType, offset int, err error) { + bindingValues = append(bindingValues, bindingValue) + }) + if err != nil { + return err + } + + for _, bindingValue := range bindingValues { + kafkaValue, _, _, err := jsonparser.Get(bindingValue, BindingsKey, KafkaKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + + err = jsonparser.ObjectEach(kafkaValue, func(key []byte, kafkaBindingItemValue []byte, dataType jsonparser.ValueType, _ int) error { + if dataType != jsonparser.String { + // Currently, we only support String values. + return nil + } + b := &Binding{ + Value: kafkaBindingItemValue, + ValueType: dataType, + } + _, ok := opt.Bindings[KafkaKey] + if !ok { + opt.Bindings[KafkaKey] = make(map[string]*Binding) + } + opt.Bindings[KafkaKey][string(key)] = b + return nil + }) + if err != nil { + return err + } + } + + channel, ok := w.asyncapi.Channels[channelName] + if !ok { + return fmt.Errorf("channel: %s is missing", channelName) + } + channel.Traits = append(channel.Traits, opt) + return nil +} + +func (w *walker) enterParametersObject(channelItem *ChannelItem, data []byte) error { + // Not mandatory + parametersValue, _, _, err := jsonparser.Get(data, ParametersKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if err != nil { + return err + } + return jsonparser.ObjectEach(parametersValue, func(parameterName []byte, parameterValue []byte, _ jsonparser.ValueType, _ int) error { + parameterType, _, _, perr := jsonparser.Get(parameterValue, SchemaKey, TypeKey) + if perr != nil { + return perr + } + channelItem.Parameters[string(parameterName)] = string(parameterType) + return nil + }) +} + +func (w *walker) enterChannelItemObject(channelName string, data []byte) error { + subscribeValue, dataType, _, err := jsonparser.Get(data, SubscribeKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if err != nil { + return err + } + + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", SubscribeKey) + } + + messageValue, dataType, _, err := jsonparser.Get(subscribeValue, MessageKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("message item is missing in channel: %s", channelName) + } + if err != nil { + return err + } + + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", MessageKey) + } + + operationID, err := extractString(OperationIDKey, subscribeValue) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("key: %s is required in channel: %s", OperationIDKey, channelName) + } + if err != nil { + return err + } + + // Not mandatory + servers, err := extractStringArray(ServersKey, data) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + err = nil + } + if err != nil { + return err + } + + channelItem := &ChannelItem{ + Kind: ChannelItemKindSubscription, + OperationID: operationID, + Servers: servers, + Parameters: make(map[string]string), + } + + err = w.enterParametersObject(channelItem, data) + if err != nil { + return err + } + + w.asyncapi.Channels[channelName] = channelItem + + err = w.enterOperationTraitsObject(channelName, subscribeValue) + if err != nil { + return err + } + + return w.enterMessageObject(channelName, messageValue) +} + +func (w *walker) enterChannelObject() error { + value, dataType, _, err := jsonparser.Get(w.document.Bytes(), ChannelsKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("key: %s is missing", ChannelsKey) + } + if err != nil { + return err + } + + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", ChannelsKey) + } + + return jsonparser.ObjectEach(value, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", key) + } + err = w.enterChannelItemObject(string(key), value) + if err != nil { + return err + } + return nil + }) +} + +func (w *walker) enterSecurityRequirementObject(key string, data []byte, s *Server) error { + sr := &SecurityRequirement{Requirements: make(map[string][]string)} + + _, err := jsonparser.ArrayEach(data, func(securityRequirement []byte, _ jsonparser.ValueType, _ int, _ error) { + sr.Requirements[key] = append(sr.Requirements[key], string(securityRequirement)) + }) + if err != nil { + return err + } + + if len(sr.Requirements) > 0 { + s.Security = append(s.Security, sr) + } + return nil +} + +func (w *walker) enterSecurityObject(s *Server, data []byte) error { + // Not mandatory + var securityObjectItems [][]byte + _, err := jsonparser.ArrayEach(data, func(securityObjectItem []byte, dataType jsonparser.ValueType, _ int, err error) { + securityObjectItems = append(securityObjectItems, securityObjectItem) + }, SecurityKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if err != nil { + return err + } + + for _, securityObjectItem := range securityObjectItems { + err = jsonparser.ObjectEach(securityObjectItem, func(key []byte, value []byte, _ jsonparser.ValueType, _ int) error { + return w.enterSecurityRequirementObject(string(key), value, s) + }) + if err != nil { + return err + } + } + return nil +} + +func (w *walker) enterServerBindingsObject(s *Server, data []byte) error { + // Not mandatory + kafkaValue, _, _, err := jsonparser.Get(data, BindingsKey, KafkaKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if err != nil { + return err + } + + return jsonparser.ObjectEach(kafkaValue, func(key []byte, kafkaBindingItemValue []byte, dataType jsonparser.ValueType, _ int) error { + if dataType != jsonparser.String { + return nil + } + b := &Binding{ + Value: kafkaBindingItemValue, + ValueType: dataType, + } + _, ok := s.Bindings[KafkaKey] + if !ok { + s.Bindings[KafkaKey] = make(map[string]*Binding) + } + s.Bindings[KafkaKey][string(key)] = b + return nil + }) +} + +func (w *walker) enterServerObject(key string, data []byte) error { + s := &Server{ + Bindings: map[string]map[string]*Binding{}, + } + + // Mandatory + urlValue, err := extractString(URLKey, data) + if err != nil { + return err + } + s.URL = urlValue + + protocolValue, err := extractString(ProtocolKey, data) + if err != nil { + return err + } + s.Protocol = protocolValue + + // Not mandatory + protocolVersionValue, err := extractString(ProtocolVersionKey, data) + if err == nil { + s.ProtocolVersion = protocolVersionValue + } + descriptionValue, err := extractString(DescriptionKey, data) + if err == nil { + s.Description = descriptionValue + } + + err = w.enterSecurityObject(s, data) + if err != nil { + return err + } + + err = w.enterServerBindingsObject(s, data) + if err != nil { + return err + } + + w.asyncapi.Servers[key] = s + return nil +} + +func (w *walker) enterServersObject() error { + // Not Mandatory + serverValue, dataType, _, err := jsonparser.Get(w.document.Bytes(), ServersKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return nil + } + if err != nil { + return err + } + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", ServersKey) + } + return jsonparser.ObjectEach(serverValue, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + return w.enterServerObject(string(key), value) + }) +} + +func (w *walker) enterInfoObject() error { + infoValue, dataType, _, err := jsonparser.Get(w.document.Bytes(), InfoKey) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("key: %s is missing", InfoKey) + } + if err != nil { + return err + } + if dataType != jsonparser.Object { + return fmt.Errorf("%s has to be a JSON object", InfoKey) + } + title, err := extractString(TitleKey, infoValue) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("field: %s is missing in %s", TitleKey, InfoKey) + } + w.asyncapi.Info.Title = title + + version, err := extractString(VersionKey, infoValue) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + return fmt.Errorf("field: %s is missing in %s", VersionKey, InfoKey) + } + w.asyncapi.Info.Version = version + return nil +} + +func ParseAsyncAPIDocument(input []byte) (*AsyncAPI, error) { + r := bytes.NewBuffer(input) + asyncAPIParser, err := parser.New() + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + err = asyncAPIParser(r, buf) + if err != nil { + return nil, err + } + + w := &walker{ + document: buf, + asyncapi: &AsyncAPI{ + Info: &Info{}, + Channels: make(map[string]*ChannelItem), + Servers: make(map[string]*Server), + }, + } + + err = w.enterInfoObject() + if err != nil { + return nil, err + } + + err = w.enterChannelObject() + if err != nil { + return nil, err + } + + err = w.enterServersObject() + if err != nil { + return nil, err + } + + return w.asyncapi, nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/converter.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/converter.go new file mode 100644 index 00000000000..2a4d47ca146 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/asyncapi/converter.go @@ -0,0 +1,282 @@ +package asyncapi + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/buger/jsonparser" + "github.com/iancoleman/strcase" +) + +type converter struct { + asyncapi *AsyncAPI + knownEnums map[string]struct{} + knownTypes map[string]struct{} +} + +// __TypeKind of introspection is an unexported type. In order to overcome the problem, +// this function creates and returns a TypeRef for a given kind. kind is a AsyncAPI type. +func getTypeRef(kind string) (introspection.TypeRef, error) { + // See introspection_enum.go + switch kind { + case "string", "integer", "number", "boolean": + return introspection.TypeRef{Kind: 0}, nil + case "object": + return introspection.TypeRef{Kind: 3}, nil + case "array": + return introspection.TypeRef{Kind: 1}, nil + } + return introspection.TypeRef{}, errors.New("unknown type") +} + +func asyncAPITypeToGQLType(asyncAPIType string) (string, error) { + // See https://www.asyncapi.com/docs/reference/specification/v2.4.0#dataTypeFormat + switch asyncAPIType { + case "string": + return string(literal.STRING), nil + case "integer": + return string(literal.INT), nil + case "number": + return string(literal.FLOAT), nil + case "boolean": + return string(literal.BOOLEAN), nil + default: + return "", fmt.Errorf("unknown type: %s", asyncAPIType) + } +} + +func (c *converter) importEnumType(name string, enums []*Enum) *introspection.FullType { + enumName := strcase.ToCamel(name) + _, ok := c.knownEnums[enumName] + if ok { + return nil + } + + enumType := &introspection.FullType{ + Kind: introspection.ENUM, + Name: enumName, + } + for _, enum := range enums { + if enum.ValueType == jsonparser.String { + enumType.EnumValues = append(enumType.EnumValues, introspection.EnumValue{ + Name: strings.ToUpper(strcase.ToSnake(string(enum.Value))), + }) + } + } + c.knownEnums[name] = struct{}{} + return enumType +} + +func (c *converter) importFullTypes() ([]introspection.FullType, error) { + fullTypes := make([]introspection.FullType, 0) + for _, channelItem := range c.asyncapi.Channels { + if channelItem.Kind != ChannelItemKindSubscription { + return nil, fmt.Errorf("channel item type: %s is not supported", channelItem.Kind) + } + msg := channelItem.Message + + fullTypeName := strcase.ToCamel(msg.Name) + if _, ok := c.knownTypes[fullTypeName]; ok { + continue + } + + var sb = strings.Builder{} + sb.WriteString(msg.Title) + sb.WriteString("\n") + sb.WriteString(msg.Summary) + sb.WriteString("\n") + sb.WriteString(msg.Description) + ft := introspection.FullType{ + Kind: introspection.OBJECT, + Name: fullTypeName, + Description: strings.TrimSpace(sb.String()), + } + + for name, prop := range msg.Payload.Properties { + var f introspection.Field + if prop.Enum == nil { + gqlType, err := asyncAPITypeToGQLType(prop.Type) + if err != nil { + return nil, err + } + typeRef, err := getTypeRef(prop.Type) + if err != nil { + return nil, err + } + typeRef.Name = &gqlType + f = introspection.Field{ + Name: name, + Description: prop.Description, + Type: typeRef, + } + } else { + // ENUM type and its fields. + enumType := c.importEnumType(name, prop.Enum) + if enumType != nil { + fullTypes = append(fullTypes, *enumType) + } + enumTypeName := strcase.ToCamel(name) + typeRef, err := getTypeRef(prop.Type) + if err != nil { + return nil, err + } + typeRef.Name = &enumTypeName + f = introspection.Field{ + Name: name, + Description: prop.Description, + Type: typeRef, + } + } + ft.Fields = append(ft.Fields, f) + sort.Slice(ft.Fields, func(i, j int) bool { + return ft.Fields[i].Name < ft.Fields[j].Name + }) + } + + c.knownTypes[fullTypeName] = struct{}{} + fullTypes = append(fullTypes, ft) + sort.Slice(fullTypes, func(i, j int) bool { + return fullTypes[i].Name < fullTypes[j].Name + }) + } + return fullTypes, nil +} + +func (c *converter) importSubscriptionType() (*introspection.FullType, error) { + subscriptionType := &introspection.FullType{ + Kind: introspection.OBJECT, + Name: "Subscription", + } + for _, channelItem := range c.asyncapi.Channels { + typeName := strcase.ToCamel(channelItem.Message.Name) + typeRef, err := getTypeRef("object") + if err != nil { + return nil, err + } + typeRef.Name = &typeName + f := introspection.Field{ + Name: strcase.ToLowerCamel(channelItem.OperationID), + Type: typeRef, + } + for paramName, paramType := range channelItem.Parameters { + gqlType, err := asyncAPITypeToGQLType(paramType) + if err != nil { + return nil, err + } + + paramTypeRef, err := getTypeRef(paramType) + if err != nil { + return nil, err + } + paramTypeRef.Name = &gqlType + + iv := introspection.InputValue{ + Name: paramName, + Type: paramTypeRef, + } + f.Args = append(f.Args, iv) + sort.Slice(f.Args, func(i, j int) bool { + return f.Args[i].Name < f.Args[j].Name + }) + } + + subscriptionType.Fields = append(subscriptionType.Fields, f) + sort.Slice(subscriptionType.Fields, func(i, j int) bool { + return subscriptionType.Fields[i].Name < subscriptionType.Fields[j].Name + }) + } + return subscriptionType, nil +} + +func (c *converter) importQueryType() (*introspection.FullType, error) { + // Query root type must be provided. We add an empty Query type with a dummy field. + // + // type Query { + // _: Boolean + // } + queryType := &introspection.FullType{ + Kind: introspection.OBJECT, + Name: "Query", + } + typeName := string(literal.BOOLEAN) + queryType.Fields = append(queryType.Fields, introspection.Field{ + Name: "_", + Type: introspection.TypeRef{Kind: 0, Name: &typeName}, + }) + return queryType, nil +} + +func ImportParsedAsyncAPIDocument(parsed *AsyncAPI, report *operationreport.Report) *ast.Document { + // A parsed AsyncAPI document may include the same enum type name more than once. + // In order to prevent from duplicated types in the resulting schema, we save the names. + c := &converter{ + asyncapi: parsed, + knownEnums: make(map[string]struct{}), + knownTypes: make(map[string]struct{}), + } + data := introspection.Data{} + + data.Schema.QueryType = &introspection.TypeName{ + Name: "Query", + } + queryType, err := c.importQueryType() + if err != nil { + report.AddInternalError(err) + return nil + } + data.Schema.Types = append(data.Schema.Types, *queryType) + + data.Schema.SubscriptionType = &introspection.TypeName{ + Name: "Subscription", + } + subscriptionType, err := c.importSubscriptionType() + if err != nil { + report.AddInternalError(err) + return nil + } + data.Schema.Types = append(data.Schema.Types, *subscriptionType) + + fullTypes, err := c.importFullTypes() + if err != nil { + report.AddInternalError(err) + return nil + } + data.Schema.Types = append(data.Schema.Types, fullTypes...) + + outputPretty, err := json.MarshalIndent(data, "", " ") + if err != nil { + report.AddInternalError(err) + return nil + } + + jc := introspection.JsonConverter{} + buf := bytes.NewBuffer(outputPretty) + doc, err := jc.GraphQLDocument(buf) + if err != nil { + report.AddInternalError(err) + return nil + } + return doc +} + +func ImportAsyncAPIDocumentByte(input []byte) (*ast.Document, operationreport.Report) { + report := operationreport.Report{} + asyncapi, err := ParseAsyncAPIDocument(input) + if err != nil { + report.AddInternalError(err) + return nil, report + } + return ImportParsedAsyncAPIDocument(asyncapi, &report), report +} + +func ImportAsyncAPIDocumentString(input string) (*ast.Document, operationreport.Report) { + return ImportAsyncAPIDocumentByte([]byte(input)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/batch.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/batch.go new file mode 100644 index 00000000000..845996fa761 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/batch.go @@ -0,0 +1,196 @@ +package graphql_datasource + +import ( + "bytes" + "fmt" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" +) + +var representationPath = []string{"body", "variables", "representations"} + +type Batch struct { + resultedInput *fastbuffer.FastBuffer + responseMappings []inputResponseBufferMappings + batchSize int +} + +// inputResponseBufferMappings defines the relationship between input containing an _entities Query +// and the output buffers, the response needs to be mapped to +type inputResponseBufferMappings struct { + // responseIndex is the array position of the response + responseIndex int + // originalInput is the original input of a response to allow comparing and deduplication + originalInput []byte + // assignedBufferIndices are the buffers to which the response needs to be assigned + assignedBufferIndices []int + + skip bool +} + +func NewBatchFactory() *BatchFactory { + return &BatchFactory{} +} + +type BatchFactory struct{} + +func (b *BatchFactory) CreateBatch(inputs [][]byte) (resolve.DataSourceBatch, error) { + if len(inputs) == 0 { + return nil, nil + } + + resultedInput := pool.FastBuffer.Get() + + responseMappings, batchSize, err := b.multiplexBatch(resultedInput, inputs) + if err != nil { + return nil, nil + } + + return &Batch{ + resultedInput: resultedInput, + responseMappings: responseMappings, + batchSize: batchSize, + }, nil +} + +func (b *Batch) Input() *fastbuffer.FastBuffer { + return b.resultedInput +} + +func (b *Batch) Demultiplex(responseBufPair *resolve.BufPair, bufPairs []*resolve.BufPair) (err error) { + defer pool.FastBuffer.Put(b.resultedInput) + + if b.batchSize != len(bufPairs) { + return fmt.Errorf("expected %d buf pairs", b.batchSize) + } + + if err = b.demultiplexBatch(responseBufPair, b.responseMappings, bufPairs); err != nil { + return err + } + + return +} + +func (b *BatchFactory) multiplexBatch(out *fastbuffer.FastBuffer, inputs [][]byte) (responseMappings []inputResponseBufferMappings, batchSize int, err error) { + if len(inputs) == 0 { + return nil, 0, nil + } + + variablesBuf := pool.FastBuffer.Get() + defer pool.FastBuffer.Put(variablesBuf) + + variablesBuf.WriteBytes(literal.LBRACK) + + var ( + variablesIdx int + skippedInputs int + firstRepresentationsStart int + firstRepresentationsEnd int + ) + + for i := range inputs { + if bytes.Equal(inputs[i], literal.NULL) { + responseMappings = append(responseMappings, inputResponseBufferMappings{ + responseIndex: i, + originalInput: inputs[i], + assignedBufferIndices: []int{i}, + skip: true, + }) + variablesIdx++ + skippedInputs++ + continue + } + inputVariables, _, representationsOffset, err := jsonparser.Get(inputs[i], representationPath...) + if err != nil { + return nil, 0, err + } + + if i == 0 { + firstRepresentationsStart = representationsOffset - len(inputVariables) + firstRepresentationsEnd = representationsOffset + } + + _, err = jsonparser.ArrayEach(inputVariables, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + for j := range responseMappings { + existing := responseMappings[j].originalInput + if bytes.Equal(existing, value) { + responseMappings[j].assignedBufferIndices = append(responseMappings[j].assignedBufferIndices, i) + return + } + } + + if variablesBuf.Len() != 1 { + variablesBuf.WriteBytes(literal.COMMA) + } + variablesBuf.WriteBytes(value) + + responseMappings = append(responseMappings, inputResponseBufferMappings{ + responseIndex: variablesIdx, + originalInput: value, + assignedBufferIndices: []int{i}, + }) + + variablesIdx++ + }) + if err != nil { + return nil, 0, err + } + } + + variablesBuf.WriteBytes(literal.RBRACK) + + representationJson := variablesBuf.Bytes() + representationJsonCopy := make([]byte, len(representationJson)) + copy(representationJsonCopy, representationJson) + + header := inputs[0][0:firstRepresentationsStart] + trailer := inputs[0][firstRepresentationsEnd:] + + out.WriteBytes(header) + out.WriteBytes(representationJsonCopy) + out.WriteBytes(trailer) + + return responseMappings, len(inputs), nil +} + +func (b *Batch) demultiplexBatch(responsePair *resolve.BufPair, responseMappings []inputResponseBufferMappings, resultBufPairs []*resolve.BufPair) (err error) { + var outPosition int + + if responsePair.HasData() { + _, err = jsonparser.ArrayEach(responsePair.Data.Bytes(), func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + if outPosition > len(responseMappings)+1 { + return + } + + mapping := responseMappings[outPosition] + for mapping.skip { + resultBufPairs[outPosition].Data.WriteBytes(literal.NULL) + outPosition++ + mapping = responseMappings[outPosition] + } + + for _, index := range mapping.assignedBufferIndices { + if resultBufPairs[index].Data.Len() != 0 { + resultBufPairs[index].Data.WriteBytes(literal.COMMA) + } + resultBufPairs[index].Data.WriteBytes(value) + } + + outPosition++ + }) + if err != nil { + return err + } + } + + if responsePair.HasErrors() { + resultBufPairs[0].Errors.WriteBytes(responsePair.Errors.Bytes()) + } + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_datasource.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_datasource.go new file mode 100644 index 00000000000..26052b23510 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_datasource.go @@ -0,0 +1,1463 @@ +package graphql_datasource + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/buger/jsonparser" + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/asttransform" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/federation" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const removeNullVariablesDirectiveName = "removeNullVariables" + +type Planner struct { + visitor *plan.Visitor + dataSourceConfig plan.DataSourceConfiguration + config Configuration + upstreamOperation *ast.Document + upstreamVariables []byte + representationsJson []byte + nodes []ast.Node + variables resolve.Variables + lastFieldEnclosingTypeName string + disallowSingleFlight bool + hasFederationRoot bool + // federationDepth is the depth in the response tree where the federation root is located. + // this field allows us to dismiss all federated fields that belong to a different subgraph easily + federationDepth int + extractEntities bool + fetchClient *http.Client + subscriptionClient GraphQLSubscriptionClient + isNested bool // isNested - flags that datasource is nested e.g. field with datasource is not on a query type + rootTypeName string // rootTypeName - holds name of top level type + rootFieldName string // rootFieldName - holds name of root type field + rootFieldRef int // rootFieldRef - holds ref of root type field + argTypeRef int // argTypeRef - holds current argument type ref from the definition + batchFactory resolve.DataSourceBatchFactory + upstreamDefinition *ast.Document + currentVariableDefinition int + addDirectivesToVariableDefinitions map[int][]int + + insideCustomScalarField bool + customScalarFieldRef int + unnulVariables bool + + parentTypeNodes []ast.Node +} + +func (p *Planner) parentNodeIsAbstract() bool { + if len(p.parentTypeNodes) < 2 { + return false + } + parentTypeNode := p.parentTypeNodes[len(p.parentTypeNodes)-2] + return parentTypeNode.Kind.IsAbstractType() +} + +func (p *Planner) EnterVariableDefinition(ref int) { + p.currentVariableDefinition = ref +} + +func (p *Planner) LeaveVariableDefinition(_ int) { + p.currentVariableDefinition = -1 +} + +func (p *Planner) EnterDirective(ref int) { + parent := p.nodes[len(p.nodes)-1] + if parent.Kind == ast.NodeKindOperationDefinition && p.currentVariableDefinition != -1 { + p.addDirectivesToVariableDefinitions[p.currentVariableDefinition] = append(p.addDirectivesToVariableDefinitions[p.currentVariableDefinition], ref) + return + } + p.addDirectiveToNode(ref, parent) +} + +func (p *Planner) addDirectiveToNode(directiveRef int, node ast.Node) { + directiveName := p.visitor.Operation.DirectiveNameString(directiveRef) + operationType := ast.OperationTypeQuery + if !p.isNested { + operationType = p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].OperationType + } + if !p.visitor.Definition.DirectiveIsAllowedOnNodeKind(directiveName, node.Kind, operationType) { + return + } + upstreamDirectiveName := p.dataSourceConfig.Directives.RenameTypeNameOnMatchStr(directiveName) + if p.upstreamDefinition != nil && !p.upstreamDefinition.DirectiveIsAllowedOnNodeKind(upstreamDirectiveName, node.Kind, operationType) { + return + } + upstreamDirective := p.visitor.Importer.ImportDirectiveWithRename(directiveRef, upstreamDirectiveName, p.visitor.Operation, p.upstreamOperation) + p.upstreamOperation.AddDirectiveToNode(upstreamDirective, node) + + // The directive is allowed on the node, so we know it exists. + directive := p.visitor.Operation.Directives[directiveRef] + + var variables []ast.Value + + // Collect all the variable arguments. + if directive.HasArguments { + for _, argument := range directive.Arguments.Refs { + value := p.visitor.Operation.ArgumentValue(argument) + // TODO: also handle literal values that CONTAIN variables + if value.Kind == ast.ValueKindVariable { + variables = append(variables, value) + } + } + } + + // Process each variable, adding it to the upstream operation and + // variables, if it hasn't already been added. Note: instead of looking + // up the type of the corresponding argument on the directive definition, + // this code assumes the type of the variable as defined in the operation + // is correct and uses the same (possibly mapped) type for the upstream + // operation. + for _, value := range variables { + variableName := p.visitor.Operation.VariableValueNameBytes(value.Ref) + + for _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs { + // Find the variable declaration in the downstream operation. + ref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref + if !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) { + continue + } + + // Look up the variable type. + variableType := p.visitor.Operation.VariableDefinitions[i].Type + typeName := p.visitor.Operation.ResolveTypeNameString(variableType) + + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(p.visitor.Operation, p.visitor.Definition, variableType) + if err != nil { + continue + } + + contextVariable := &resolve.ContextVariable{ + Path: []string{string(variableName)}, + Renderer: renderer, + } + + // Try to add the variable to the set of upstream variables. + contextVariableName, exists := p.variables.AddVariable(contextVariable) + + // If the variable already exists, it also already exists in the + // upstream operation; there's nothing to add! + if exists { + continue + } + + // Add the variable to the upstream operation. Be sure to map the + // downstream type to the upstream type, if needed. + upstreamVariable := p.upstreamOperation.ImportVariableValue(variableName) + upstreamTypeName := p.visitor.Config.Types.RenameTypeNameOnMatchStr(typeName) + importedType := p.visitor.Importer.ImportTypeWithRename(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation, upstreamTypeName) + p.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, upstreamVariable, importedType) + + // Also copy any variable directives in the downstream operation to + // the upstream operation. + if add, ok := p.addDirectivesToVariableDefinitions[i]; ok { + for _, directive := range add { + p.addDirectiveToNode(directive, ast.Node{Kind: ast.NodeKindVariableDefinition, Ref: i}) + } + } + + // And finally add the variable to the upstream variables JSON. + p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, string(variableName), []byte(contextVariableName)) + } + } +} + +func (p *Planner) DownstreamResponseFieldAlias(downstreamFieldRef int) (alias string, exists bool) { + // If there's no alias but the downstream Query re-uses the same path on different root fields, + // we rewrite the downstream Query using an alias so that we can have an aliased Query to the upstream + // while keeping a non aliased Query to the downstream but with a path rewrite on an existing root field. + + fieldName := p.visitor.Operation.FieldNameUnsafeString(downstreamFieldRef) + + if p.visitor.Operation.FieldAliasIsDefined(downstreamFieldRef) { + return "", false + } + + typeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition) + for i := range p.visitor.Config.Fields { + if p.visitor.Config.Fields[i].TypeName == typeName && + p.visitor.Config.Fields[i].FieldName == fieldName && + len(p.visitor.Config.Fields[i].Path) == 1 { + + if p.visitor.Config.Fields[i].Path[0] != fieldName { + aliasBytes := p.visitor.Operation.FieldNameBytes(downstreamFieldRef) + return string(aliasBytes), true + } + break + } + } + return "", false +} + +func (p *Planner) DataSourcePlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: true, + OverrideFieldPathFromAlias: true, + IncludeTypeNameFields: true, + } +} + +type Configuration struct { + Fetch FetchConfiguration + Subscription SubscriptionConfiguration + Federation FederationConfiguration + UpstreamSchema string + CustomScalarTypeFields []SingleTypeField +} + +type SingleTypeField struct { + TypeName string + FieldName string +} + +func ConfigJson(config Configuration) json.RawMessage { + out, _ := json.Marshal(config) + return out +} + +type FederationConfiguration struct { + Enabled bool + ServiceSDL string +} + +type SubscriptionConfiguration struct { + URL string + UseSSE bool + SSEMethodPost bool +} + +type FetchConfiguration struct { + URL string + Method string + Header http.Header +} + +func (c *Configuration) ApplyDefaults() { + if c.Fetch.Method == "" { + c.Fetch.Method = "POST" + } +} + +func (p *Planner) Register(visitor *plan.Visitor, configuration plan.DataSourceConfiguration, isNested bool) error { + p.visitor = visitor + p.visitor.Walker.RegisterDocumentVisitor(p) + p.visitor.Walker.RegisterFieldVisitor(p) + p.visitor.Walker.RegisterOperationDefinitionVisitor(p) + p.visitor.Walker.RegisterSelectionSetVisitor(p) + p.visitor.Walker.RegisterEnterArgumentVisitor(p) + p.visitor.Walker.RegisterInlineFragmentVisitor(p) + p.visitor.Walker.RegisterEnterDirectiveVisitor(p) + p.visitor.Walker.RegisterVariableDefinitionVisitor(p) + + p.dataSourceConfig = configuration + err := json.Unmarshal(configuration.Custom, &p.config) + if err != nil { + return err + } + + p.config.ApplyDefaults() + p.isNested = isNested + + return nil +} + +func (p *Planner) ConfigureFetch() plan.FetchConfiguration { + var input []byte + input = httpclient.SetInputBodyWithPath(input, p.upstreamVariables, "variables") + input = httpclient.SetInputBodyWithPath(input, p.printOperation(), "query") + + if p.unnulVariables { + input = httpclient.SetInputFlag(input, httpclient.UNNULLVARIABLES) + } + + header, err := json.Marshal(p.config.Fetch.Header) + if err == nil && len(header) != 0 && !bytes.Equal(header, literal.NULL) { + input = httpclient.SetInputHeader(input, header) + } + + input = httpclient.SetInputURL(input, []byte(p.config.Fetch.URL)) + input = httpclient.SetInputMethod(input, []byte(p.config.Fetch.Method)) + + var batchConfig plan.BatchConfig + // Allow batch query for fetching entities. + if p.extractEntities && p.batchFactory != nil { + batchConfig = plan.BatchConfig{ + AllowBatch: p.extractEntities, // Allow batch query for fetching entities. + BatchFactory: p.batchFactory, + } + } + + return plan.FetchConfiguration{ + Input: string(input), + DataSource: &Source{ + httpClient: p.fetchClient, + }, + Variables: p.variables, + DisallowSingleFlight: p.disallowSingleFlight, + ProcessResponseConfig: resolve.ProcessResponseConfig{ + ExtractGraphqlResponse: true, + ExtractFederationEntities: p.extractEntities, + }, + BatchConfig: batchConfig, + SetTemplateOutputToNullOnVariableNull: batchConfig.AllowBatch, + } +} + +func (p *Planner) ConfigureSubscription() plan.SubscriptionConfiguration { + input := httpclient.SetInputBodyWithPath(nil, p.upstreamVariables, "variables") + input = httpclient.SetInputBodyWithPath(input, p.printOperation(), "query") + input = httpclient.SetInputURL(input, []byte(p.config.Subscription.URL)) + if p.config.Subscription.UseSSE { + input = httpclient.SetInputFlag(input, httpclient.USESSE) + if p.config.Subscription.SSEMethodPost { + input = httpclient.SetInputFlag(input, httpclient.SSEMETHODPOST) + } + } + + header, err := json.Marshal(p.config.Fetch.Header) + if err == nil && len(header) != 0 && !bytes.Equal(header, literal.NULL) { + input = httpclient.SetInputHeader(input, header) + } + + return plan.SubscriptionConfiguration{ + Input: string(input), + DataSource: &SubscriptionSource{ + client: p.subscriptionClient, + }, + Variables: p.variables, + } +} + +func (p *Planner) EnterOperationDefinition(ref int) { + if p.visitor.Operation.OperationDefinitions[ref].HasDirectives && + p.visitor.Operation.OperationDefinitions[ref].Directives.HasDirectiveByName(p.visitor.Operation, removeNullVariablesDirectiveName) { + p.unnulVariables = true + p.visitor.Operation.OperationDefinitions[ref].Directives.RemoveDirectiveByName(p.visitor.Operation, removeNullVariablesDirectiveName) + } + + operationType := p.visitor.Operation.OperationDefinitions[ref].OperationType + if p.isNested { + operationType = ast.OperationTypeQuery + } + definition := p.upstreamOperation.AddOperationDefinitionToRootNodes(ast.OperationDefinition{ + OperationType: operationType, + }) + p.disallowSingleFlight = operationType == ast.OperationTypeMutation + p.nodes = append(p.nodes, definition) +} + +func (p *Planner) LeaveOperationDefinition(_ int) { + p.nodes = p.nodes[:len(p.nodes)-1] +} + +func (p *Planner) EnterSelectionSet(ref int) { + p.parentTypeNodes = append(p.parentTypeNodes, p.visitor.Walker.EnclosingTypeDefinition) + if p.insideCustomScalarField { + return + } + + parent := p.nodes[len(p.nodes)-1] + set := p.upstreamOperation.AddSelectionSet() + switch parent.Kind { + case ast.NodeKindSelectionSet: + // this happens when we're inside the root of a nested abstract federated query + // we want to walk into and out of the selection set because the root field is abstract + // this allows us to walk out of the inline fragment in the root + // however, as a nested operation always starts with an Operation Definition and a Selection Set + // we don't want to add the selection set to the root nodes + return + case ast.NodeKindOperationDefinition: + p.upstreamOperation.OperationDefinitions[parent.Ref].HasSelections = true + p.upstreamOperation.OperationDefinitions[parent.Ref].SelectionSet = set.Ref + case ast.NodeKindField: + p.upstreamOperation.Fields[parent.Ref].HasSelections = true + p.upstreamOperation.Fields[parent.Ref].SelectionSet = set.Ref + case ast.NodeKindInlineFragment: + p.upstreamOperation.InlineFragments[parent.Ref].HasSelections = true + p.upstreamOperation.InlineFragments[parent.Ref].SelectionSet = set.Ref + } + p.nodes = append(p.nodes, set) + // Abstract meaning interface or union + if p.visitor.Walker.EnclosingTypeDefinition.Kind.IsAbstractType() { + // Always include __typename in abstract type selection sets. This is + // done because child fields may be federated and __typename will be + // needed for representations. While it would be possible to determine + // exactly when __typename is needed, there's no harm in just always + // including it. + p.addTypenameToSelectionSet(set.Ref) + return + } + + for _, selectionRef := range p.visitor.Operation.SelectionSets[ref].SelectionRefs { + if p.visitor.Operation.Selections[selectionRef].Kind == ast.SelectionKindField { + if p.visitor.Operation.FieldNameUnsafeString(p.visitor.Operation.Selections[selectionRef].Ref) == "__typename" { + p.addTypenameToSelectionSet(set.Ref) + } + } + } +} + +func (p *Planner) addTypenameToSelectionSet(selectionSet int) { + field := p.upstreamOperation.AddField(ast.Field{ + Name: p.upstreamOperation.Input.AppendInputString("__typename"), + }) + p.upstreamOperation.AddSelection(selectionSet, ast.Selection{ + Ref: field.Ref, + Kind: ast.SelectionKindField, + }) +} + +func (p *Planner) LeaveSelectionSet(_ int) { + p.parentTypeNodes = p.parentTypeNodes[:len(p.parentTypeNodes)-1] + if p.insideCustomScalarField { + return + } + + lastIndex := len(p.nodes) - 1 + if p.nodes[lastIndex].Kind == ast.NodeKindSelectionSet { + p.nodes = p.nodes[:lastIndex] + } +} + +func (p *Planner) EnterInlineFragment(ref int) { + if p.insideCustomScalarField { + return + } + + if p.config.Federation.Enabled && !p.hasFederationRoot && p.isNestedRequest() { + // if we're inside the nested root of a federated abstract query, + // we're walking into the inline fragment as the root + // however, as we're already handling the inline fragment when we walk into the root field, + // we can skip this one + return + } + + typeCondition := p.visitor.Operation.InlineFragmentTypeConditionName(ref) + if typeCondition == nil && !p.visitor.Operation.InlineFragments[ref].HasDirectives { + return + } + + fragmentType := -1 + if typeCondition != nil { + fragmentType = p.upstreamOperation.AddNamedType(p.visitor.Config.Types.RenameTypeNameOnMatchBytes(typeCondition)) + } + + inlineFragment := p.upstreamOperation.AddInlineFragment(ast.InlineFragment{ + TypeCondition: ast.TypeCondition{ + Type: fragmentType, + }, + }) + + selection := ast.Selection{ + Kind: ast.SelectionKindInlineFragment, + Ref: inlineFragment, + } + + if typeCondition != nil { + // add __typename field to selection set which contains typeCondition + // so that the resolver can distinguish between the response types + p.addTypenameToSelectionSet(p.nodes[len(p.nodes)-1].Ref) + } + + p.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection) + p.nodes = append(p.nodes, ast.Node{Kind: ast.NodeKindInlineFragment, Ref: inlineFragment}) +} + +func (p *Planner) LeaveInlineFragment(_ int) { + if p.insideCustomScalarField { + return + } + + lastIndex := len(p.nodes) - 1 + if p.nodes[lastIndex].Kind == ast.NodeKindInlineFragment { + p.nodes = p.nodes[:lastIndex] + } +} + +func (p *Planner) EnterField(ref int) { + if p.insideCustomScalarField { + return + } + + fieldName := p.visitor.Operation.FieldNameString(ref) + enclosingTypeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition) + + for i := range p.config.CustomScalarTypeFields { + if p.config.CustomScalarTypeFields[i].TypeName == enclosingTypeName && p.config.CustomScalarTypeFields[i].FieldName == fieldName { + p.insideCustomScalarField = true + p.customScalarFieldRef = ref + p.addCustomField(ref) + return + } + } + + p.lastFieldEnclosingTypeName = enclosingTypeName + + // store root field name and ref + if p.rootFieldName == "" { + p.rootFieldName = fieldName + p.rootFieldRef = ref + } + // store root type name + if p.rootTypeName == "" { + p.rootTypeName = enclosingTypeName + } + + fieldConfiguration := p.visitor.Config.Fields.ForTypeField(enclosingTypeName, fieldName) + if fieldConfiguration == nil && fieldName != "__typename" { + p.addField(ref) + return + } + + // Note: federated fields always have a field configuration because at + // least the federation key for the type the field lives on is required + // (and required fields are specified in the configuration). + p.handleFederation(fieldConfiguration) + p.addField(ref) + + upstreamFieldRef := p.nodes[len(p.nodes)-1].Ref + + if fieldConfiguration == nil { + return + } + + for i := range fieldConfiguration.Arguments { + argumentConfiguration := fieldConfiguration.Arguments[i] + p.configureArgument(upstreamFieldRef, ref, *fieldConfiguration, argumentConfiguration) + } +} + +func (p *Planner) addCustomField(ref int) { + fieldName := p.visitor.Operation.FieldNameString(ref) + field := p.upstreamOperation.AddField(ast.Field{ + Name: p.upstreamOperation.Input.AppendInputString(fieldName), + }) + selection := ast.Selection{ + Kind: ast.SelectionKindField, + Ref: field.Ref, + } + p.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection) +} + +func (p *Planner) LeaveField(ref int) { + if p.insideCustomScalarField { + if p.customScalarFieldRef == ref { + p.insideCustomScalarField = false + p.customScalarFieldRef = 0 + } + return + } + + p.nodes = p.nodes[:len(p.nodes)-1] +} + +func (p *Planner) EnterArgument(_ int) { + if p.insideCustomScalarField { + return + } +} + +func (p *Planner) EnterDocument(_, _ *ast.Document) { + if p.upstreamOperation == nil { + p.upstreamOperation = ast.NewDocument() + } else { + p.upstreamOperation.Reset() + } + p.nodes = p.nodes[:0] + p.parentTypeNodes = p.parentTypeNodes[:0] + p.upstreamVariables = nil + p.variables = p.variables[:0] + p.representationsJson = p.representationsJson[:0] + p.disallowSingleFlight = false + p.hasFederationRoot = false + p.extractEntities = false + + // reset information about root type + p.rootTypeName = "" + p.rootFieldName = "" + p.rootFieldRef = -1 + + // reset info about arg type + p.argTypeRef = -1 + + p.addDirectivesToVariableDefinitions = map[int][]int{} + + p.upstreamDefinition = nil + if p.config.UpstreamSchema != "" { + p.upstreamDefinition = ast.NewDocument() + p.upstreamDefinition.Input.ResetInputString(p.config.UpstreamSchema) + parser := astparser.NewParser() + var report operationreport.Report + parser.Parse(p.upstreamDefinition, &report) + if report.HasErrors() { + p.visitor.Walker.StopWithInternalErr(report) + return + } + err := asttransform.MergeDefinitionWithBaseSchema(p.upstreamDefinition) + if err != nil { + p.visitor.Walker.StopWithInternalErr(err) + return + } + } +} + +func (p *Planner) LeaveDocument(_, _ *ast.Document) { +} + +func (p *Planner) handleFederation(fieldConfig *plan.FieldConfiguration) { + if !p.config.Federation.Enabled { // federation must be enabled + return + } + // If there's no federation root and this isn't a nested request, this + // isn't a federated field and there's nothing to do. + if !p.hasFederationRoot && !p.isNestedRequest() { + return + } + // If a federated root is already present, the representations variable has + // already been added. Update it to include information for the additional + // field. NOTE: only the first federated field has isNestedRequest set to + // true. Subsequent fields use hasFederationRoot to determine federation + // status. + if p.hasFederationRoot { + // Ideally the "representations" variable could be set once in + // LeaveDocument, but ConfigureFetch is called before this visitor's + // LeaveDocument is called. (Updating the visitor logic to call + // LeaveDocument in reverse registration order would fix this issue.) + p.updateRepresentationsVariable(fieldConfig) + return + } + p.hasFederationRoot = true + p.federationDepth = p.visitor.Walker.Depth + // query($representations: [_Any!]!){_entities(representations: $representations){... on Product + p.addRepresentationsVariableDefinition() // $representations: [_Any!]! + p.addEntitiesSelectionSet() // {_entities(representations: $representations) + p.addOnTypeInlineFragment() // ... on Product + p.updateRepresentationsVariable(fieldConfig) // "variables\":{\"representations\":[{\"upc\":\"$$0$$\",\"__typename\":\"Product\"}]}} +} + +func (p *Planner) updateRepresentationsVariable(fieldConfig *plan.FieldConfiguration) { + if p.visitor.Walker.Depth != p.federationDepth { + // given that this field has a different depth than the federation root, we skip this field + // this is because we only have to handle federated fields that are part of the "current" federated request + // we're calling this func with the current field because it's both another federated subfield, + // but the current subgraph is also capable of resolving it + // in this case, we don't need to add the required fields to the variables because the context differs + return + } + + // "variables\":{\"representations\":[{\"upc\":\$$0$$\,\"__typename\":\"Product\"}]}} + parser := astparser.NewParser() + doc := ast.NewDocument() + doc.Input.ResetInputString(p.config.Federation.ServiceSDL) + report := &operationreport.Report{} + parser.Parse(doc, report) + if report.HasErrors() { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("GraphQL Planner: failed parsing Federation SDL")) + return + } + + // RequiresFields includes `@requires` fields as well as federation keys + // for the type containing the field currently being visited. + fields := fieldConfig.RequiresFields + if len(fields) == 0 { + return + } + + if len(p.representationsJson) == 0 { + // If the parent is an abstract type, i.e., an interface or union, + // the representation typename must come from a parent fetch response. + if p.parentNodeIsAbstract() { + objectVariable := &resolve.ObjectVariable{ + Path: []string{"__typename"}, + } + objectVariable.Renderer = resolve.NewJSONVariableRendererWithValidation(`{"type":"string"}`) + if variable, exists := p.variables.AddVariable(objectVariable); !exists { + p.representationsJson, _ = sjson.SetRawBytes(p.representationsJson, "__typename", []byte(variable)) + } + } else { // otherwise use the concrete typename + onTypeName := p.visitor.Config.Types.RenameTypeNameOnMatchStr(p.lastFieldEnclosingTypeName) + p.representationsJson, _ = sjson.SetRawBytes(nil, "__typename", []byte("\""+onTypeName+"\"")) + } + } + + for i := range fields { + objectVariable := &resolve.ObjectVariable{ + Path: []string{fields[i]}, + } + fieldDef := p.fieldDefinition(fields[i], p.lastFieldEnclosingTypeName) + if fieldDef == nil { + continue + } + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(p.visitor.Definition, p.visitor.Definition, fieldDef.Type) + if err != nil { + continue + } + objectVariable.Renderer = renderer + variable, exists := p.variables.AddVariable(objectVariable) + if exists { + continue + } + p.representationsJson, _ = sjson.SetRawBytes(p.representationsJson, fields[i], []byte(variable)) + } + representationsJson := append([]byte("["), append(p.representationsJson, []byte("]")...)...) + p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, "representations", representationsJson) + p.extractEntities = true +} + +func (p *Planner) fieldDefinition(fieldName, typeName string) *ast.FieldDefinition { + node, ok := p.visitor.Definition.Index.FirstNodeByNameStr(typeName) + if !ok { + return nil + } + definition, ok := p.visitor.Definition.NodeFieldDefinitionByName(node, []byte(fieldName)) + if !ok { + return nil + } + return &p.visitor.Definition.FieldDefinitions[definition] +} + +func (p *Planner) addOnTypeInlineFragment() { + selectionSet := p.upstreamOperation.AddSelectionSet() + p.addTypenameToSelectionSet(p.nodes[len(p.nodes)-1].Ref) + onTypeName := p.visitor.Config.Types.RenameTypeNameOnMatchBytes([]byte(p.lastFieldEnclosingTypeName)) + typeRef := p.upstreamOperation.AddNamedType(onTypeName) + inlineFragment := p.upstreamOperation.AddInlineFragment(ast.InlineFragment{ + HasSelections: true, + SelectionSet: selectionSet.Ref, + TypeCondition: ast.TypeCondition{ + Type: typeRef, + }, + }) + p.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, ast.Selection{ + Kind: ast.SelectionKindInlineFragment, + Ref: inlineFragment, + }) + p.nodes = append(p.nodes, selectionSet) +} + +func (p *Planner) addEntitiesSelectionSet() { + // $representations + representationsLiteral := p.upstreamOperation.Input.AppendInputString("representations") + representationsVariable := p.upstreamOperation.AddVariableValue(ast.VariableValue{ + Name: representationsLiteral, + }) + representationsArgument := p.upstreamOperation.AddArgument(ast.Argument{ + Name: representationsLiteral, + Value: ast.Value{ + Kind: ast.ValueKindVariable, + Ref: representationsVariable, + }, + }) + + // _entities + entitiesSelectionSet := p.upstreamOperation.AddSelectionSet() + entitiesField := p.upstreamOperation.AddField(ast.Field{ + Name: p.upstreamOperation.Input.AppendInputString("_entities"), + HasSelections: true, + HasArguments: true, + Arguments: ast.ArgumentList{ + Refs: []int{representationsArgument}, + }, + SelectionSet: entitiesSelectionSet.Ref, + }) + p.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, ast.Selection{ + Kind: ast.SelectionKindField, + Ref: entitiesField.Ref, + }) + p.nodes = append(p.nodes, entitiesField, entitiesSelectionSet) +} + +func (p *Planner) addRepresentationsVariableDefinition() { + anyType := p.upstreamOperation.AddNamedType([]byte("_Any")) + nonNullAnyType := p.upstreamOperation.AddNonNullType(anyType) + listOfNonNullAnyType := p.upstreamOperation.AddListType(nonNullAnyType) + nonNullListOfNonNullAnyType := p.upstreamOperation.AddNonNullType(listOfNonNullAnyType) + + representationsVariable := p.upstreamOperation.ImportVariableValue([]byte("representations")) + p.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, representationsVariable, nonNullListOfNonNullAnyType) +} + +func (p *Planner) isNestedRequest() bool { + for i := range p.nodes { + if p.nodes[i].Kind == ast.NodeKindField { + return false + } + } + selectionSetAncestors := 0 + for i := range p.visitor.Walker.Ancestors { + if p.visitor.Walker.Ancestors[i].Kind == ast.NodeKindSelectionSet { + selectionSetAncestors++ + if selectionSetAncestors == 2 { + return true + } + } + } + return false +} + +func (p *Planner) storeArgType(typeName, fieldName, argName string) { + typeNode, _ := p.visitor.Definition.Index.FirstNodeByNameStr(typeName) + + for _, fieldDefRef := range p.visitor.Definition.ObjectTypeDefinitions[typeNode.Ref].FieldsDefinition.Refs { + if bytes.Equal(p.visitor.Definition.FieldDefinitionNameBytes(fieldDefRef), []byte(fieldName)) { + for _, argDefRef := range p.visitor.Definition.FieldDefinitions[fieldDefRef].ArgumentsDefinition.Refs { + if bytes.Equal(p.visitor.Definition.InputValueDefinitionNameBytes(argDefRef), []byte(argName)) { + p.argTypeRef = p.visitor.Definition.ResolveListOrNameType(p.visitor.Definition.InputValueDefinitions[argDefRef].Type) + return + } + } + } + } +} + +func (p *Planner) configureArgument(upstreamFieldRef, downstreamFieldRef int, fieldConfig plan.FieldConfiguration, argumentConfiguration plan.ArgumentConfiguration) { + p.storeArgType(fieldConfig.TypeName, fieldConfig.FieldName, argumentConfiguration.Name) + + switch argumentConfiguration.SourceType { + case plan.FieldArgumentSource: + p.configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef, argumentConfiguration) + case plan.ObjectFieldSource: + p.configureObjectFieldSource(upstreamFieldRef, downstreamFieldRef, fieldConfig, argumentConfiguration) + } + + p.argTypeRef = -1 +} + +// configureFieldArgumentSource - creates variables for a plain argument types, in case object or list types goes deep and calls applyInlineFieldArgument +func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentConfiguration plan.ArgumentConfiguration) { + fieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentConfiguration.Name)) + if !ok { + return + } + value := p.visitor.Operation.ArgumentValue(fieldArgument) + if value.Kind != ast.ValueKindVariable { + p.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentConfiguration.Name, argumentConfiguration.SourcePath) + return + } + variableName := p.visitor.Operation.VariableValueNameBytes(value.Ref) + variableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref) + + fieldName := p.visitor.Operation.FieldNameBytes(downstreamFieldRef) + argumentDefinition := p.visitor.Definition.NodeFieldDefinitionArgumentDefinitionByName(p.visitor.Walker.EnclosingTypeDefinition, fieldName, []byte(argumentConfiguration.Name)) + + if argumentDefinition == -1 { + return + } + + argumentType := p.visitor.Definition.InputValueDefinitionType(argumentDefinition) + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(p.visitor.Definition, p.visitor.Definition, argumentType) + if err != nil { + return + } + + contextVariable := &resolve.ContextVariable{ + Path: []string{variableNameStr}, + Renderer: renderer, + } + + contextVariableName, exists := p.variables.AddVariable(contextVariable) + variableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentConfiguration.Name), variableName) // add the argument to the field, but don't redefine it + p.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef) + + if exists { // if the variable exists we don't have to put it onto the variables declaration again, skip + return + } + + for _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs { + ref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref + if !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) { + continue + } + typeName := p.visitor.Operation.ResolveTypeNameString(p.visitor.Operation.VariableDefinitions[i].Type) + typeName = p.visitor.Config.Types.RenameTypeNameOnMatchStr(typeName) + if argumentConfiguration.RenameTypeTo != "" { + typeName = argumentConfiguration.RenameTypeTo + } + importedType := p.visitor.Importer.ImportTypeWithRename(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation, typeName) + p.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType) + + if add, ok := p.addDirectivesToVariableDefinitions[i]; ok { + for _, directive := range add { + p.addDirectiveToNode(directive, ast.Node{Kind: ast.NodeKindVariableDefinition, Ref: i}) + } + } + } + + p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName)) +} + +// applyInlineFieldArgument - configures arguments for a complex argument of a list or input object type +func (p *Planner) applyInlineFieldArgument(upstreamField, downstreamField int, argumentName string, sourcePath []string) { + fieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamField, []byte(argumentName)) + if !ok { + return + } + value := p.visitor.Operation.ArgumentValue(fieldArgument) + importedValue := p.visitor.Importer.ImportValue(value, p.visitor.Operation, p.upstreamOperation) + argRef := p.upstreamOperation.AddArgument(ast.Argument{ + Name: p.upstreamOperation.Input.AppendInputString(argumentName), + Value: importedValue, + }) + p.upstreamOperation.AddArgumentToField(upstreamField, argRef) + + p.addVariableDefinitionsRecursively(value, sourcePath, nil) +} + +// resolveNestedArgumentType - extracts type of nested field or array element of argument +// fieldName - exists only for ast.ValueKindObject type of argument +func (p *Planner) resolveNestedArgumentType(fieldName []byte) (fieldTypeRef int) { + if fieldName == nil { + return p.visitor.Definition.ResolveListOrNameType(p.argTypeRef) + } + + argTypeName := p.visitor.Definition.ResolveTypeNameString(p.argTypeRef) + argTypeNode, _ := p.visitor.Definition.Index.FirstNodeByNameStr(argTypeName) + + for _, inputFieldDefRef := range p.visitor.Definition.InputObjectTypeDefinitions[argTypeNode.Ref].InputFieldsDefinition.Refs { + if bytes.Equal(p.visitor.Definition.InputValueDefinitionNameBytes(inputFieldDefRef), fieldName) { + return p.visitor.Definition.InputValueDefinitions[inputFieldDefRef].Type + } + } + + return -1 +} + +// addVariableDefinitionsRecursively - recursively configures variables inside a list or an input type +func (p *Planner) addVariableDefinitionsRecursively(value ast.Value, sourcePath []string, fieldName []byte) { + switch value.Kind { + case ast.ValueKindObject: + prevArgTypeRef := p.argTypeRef + p.argTypeRef = p.resolveNestedArgumentType(fieldName) + for _, objectFieldRef := range p.visitor.Operation.ObjectValues[value.Ref].Refs { + p.addVariableDefinitionsRecursively(p.visitor.Operation.ObjectFields[objectFieldRef].Value, sourcePath, p.visitor.Operation.ObjectFieldNameBytes(objectFieldRef)) + } + p.argTypeRef = prevArgTypeRef + return + case ast.ValueKindList: + for _, i := range p.visitor.Operation.ListValues[value.Ref].Refs { + p.addVariableDefinitionsRecursively(p.visitor.Operation.Values[i], sourcePath, nil) + } + return + case ast.ValueKindVariable: + // continue after switch + default: + return + } + + variableName := p.visitor.Operation.VariableValueNameBytes(value.Ref) + variableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref) + variableDefinition, exists := p.visitor.Operation.VariableDefinitionByNameAndOperation(p.visitor.Walker.Ancestors[0].Ref, variableName) + if !exists { + return + } + + variableDefinitionTypeRef := p.visitor.Operation.VariableDefinitions[variableDefinition].Type + variableDefinitionTypeName := p.visitor.Operation.ResolveTypeNameString(variableDefinitionTypeRef) + variableDefinitionTypeName = p.visitor.Config.Types.RenameTypeNameOnMatchStr(variableDefinitionTypeName) + + contextVariable := &resolve.ContextVariable{ + Path: append(sourcePath, variableNameStr), + } + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(p.visitor.Operation, p.visitor.Definition, variableDefinitionTypeRef) + if err != nil { + return + } + contextVariable.Renderer = renderer + contextVariableName, variableExists := p.variables.AddVariable(contextVariable) + if variableExists { + return + } + + importedVariableDefinition := p.visitor.Importer.ImportVariableDefinitionWithRename(variableDefinition, p.visitor.Operation, p.upstreamOperation, variableDefinitionTypeName) + p.upstreamOperation.AddImportedVariableDefinitionToOperationDefinition(p.nodes[0].Ref, importedVariableDefinition) + + p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName)) +} + +// configureObjectFieldSource - configures source of a field when it has variables coming from current object +func (p *Planner) configureObjectFieldSource(upstreamFieldRef, downstreamFieldRef int, fieldConfiguration plan.FieldConfiguration, argumentConfiguration plan.ArgumentConfiguration) { + if len(argumentConfiguration.SourcePath) < 1 { + return + } + + fieldName := p.visitor.Operation.FieldNameUnsafeString(downstreamFieldRef) + + if len(fieldConfiguration.Path) == 1 { + fieldName = fieldConfiguration.Path[0] + } + + queryTypeDefinition, exists := p.visitor.Definition.Index.FirstNodeByNameBytes(p.visitor.Definition.Index.QueryTypeName) + if !exists { + return + } + argumentDefinition := p.visitor.Definition.NodeFieldDefinitionArgumentDefinitionByName(queryTypeDefinition, []byte(fieldName), []byte(argumentConfiguration.Name)) + if argumentDefinition == -1 { + return + } + + argumentType := p.visitor.Definition.InputValueDefinitionType(argumentDefinition) + variableName := p.upstreamOperation.GenerateUnusedVariableDefinitionName(p.nodes[0].Ref) + variableValue, argument := p.upstreamOperation.AddVariableValueArgument([]byte(argumentConfiguration.Name), variableName) + p.upstreamOperation.AddArgumentToField(upstreamFieldRef, argument) + + typeName := p.visitor.Operation.ResolveTypeNameString(argumentType) + typeName = p.visitor.Config.Types.RenameTypeNameOnMatchStr(typeName) + if argumentConfiguration.RenameTypeTo != "" { + typeName = argumentConfiguration.RenameTypeTo + } + + importedType := p.visitor.Importer.ImportTypeWithRename(argumentType, p.visitor.Definition, p.upstreamOperation, typeName) + p.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValue, importedType) + + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(p.visitor.Definition, p.visitor.Definition, argumentType) + if err != nil { + return + } + + variable := &resolve.ObjectVariable{ + Path: argumentConfiguration.SourcePath, + Renderer: renderer, + } + + objectVariableName, exists := p.variables.AddVariable(variable) + if !exists { + p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, string(variableName), []byte(objectVariableName)) + } +} + +const ( + normalizationFailedErrMsg = "printOperation: normalization failed" + parseDocumentFailedErrMsg = "printOperation: parse %s failed" +) + +// printOperation - prints normalized upstream operation +func (p *Planner) printOperation() []byte { + buf := &bytes.Buffer{} + + err := astprinter.Print(p.upstreamOperation, nil, buf) + if err != nil { + return nil + } + + rawQuery := buf.Bytes() + + // create empty operation and definition documents + operation := ast.NewDocument() + definition := ast.NewDocument() + report := &operationreport.Report{} + operationParser := astparser.NewParser() + definitionParser := astparser.NewParser() + + operation.Input.ResetInputBytes(rawQuery) + operationParser.Parse(operation, report) + if report.HasErrors() { + p.stopWithError(parseDocumentFailedErrMsg, "operation") + return nil + } + + if p.config.UpstreamSchema == "" { + p.config.UpstreamSchema, err = astprinter.PrintString(p.visitor.Definition, nil) + if err != nil { + p.visitor.Walker.StopWithInternalErr(err) + return nil + } + } + + if p.config.Federation.Enabled { + federationSchema, err := federation.BuildFederationSchema(p.config.UpstreamSchema, p.config.Federation.ServiceSDL) + if err != nil { + p.visitor.Walker.StopWithInternalErr(err) + return nil + } + definition.Input.ResetInputString(federationSchema) + definitionParser.Parse(definition, report) + if report.HasErrors() { + p.stopWithError(parseDocumentFailedErrMsg, "definition") + return nil + } + } else { + definition.Input.ResetInputString(p.config.UpstreamSchema) + definitionParser.Parse(definition, report) + if report.HasErrors() { + p.stopWithError("unable to parse upstream schema") + return nil + } + } + + if err := asttransform.MergeDefinitionWithBaseSchema(definition); err != nil { + p.stopWithError("unable to merge upstream schema with base schema") + return nil + } + + // When datasource is nested and definition query type do not contain operation field + // we have to replace a query type with a current root type + p.replaceQueryType(definition) + + // normalize upstream operation + if !p.normalizeOperation(operation, definition, report) { + p.stopWithError(normalizationFailedErrMsg) + return nil + } + + validator := astvalidation.DefaultOperationValidator() + validator.Validate(operation, definition, report) + if report.HasErrors() { + p.stopWithError("validation failed: %s", report.Error()) + return nil + } + + buf.Reset() + + // print upstream operation + err = astprinter.Print(operation, p.visitor.Definition, buf) + if err != nil { + p.stopWithError(normalizationFailedErrMsg) + return nil + } + + return buf.Bytes() +} + +func (p *Planner) stopWithError(msg string, args ...interface{}) { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf(msg, args...)) +} + +/* +replaceQueryType - sets definition query type to a current root type. +Helps to do a normalization of the upstream query for a nested datasource. +Skips replace when: +1. datasource is not nested; +2. federation is enabled; +3. query type contains an operation field; + +Example transformation: +Original schema definition: + + type Query { + serviceOne(serviceOneArg: String): ServiceOneResponse + serviceTwo(serviceTwoArg: Boolean): ServiceTwoResponse + } + + type ServiceOneResponse { + fieldOne: String! + countries: [Country!]! # nested datasource without explicit field path + } + + type ServiceTwoResponse { + fieldTwo: String + serviceOneField: String + serviceOneResponse: ServiceOneResponse # nested datasource with implicit field path "serviceOne" + } + + type Country { + name: String! + } + +`serviceOneResponse` field of a `ServiceTwoResponse` is nested but has a field path that exists on the Query type +- In this case definition will not be modified + +`countries` field of a `ServiceOneResponse` is nested and not present on the Query type +- In this case query type of definition will be replaced with a `ServiceOneResponse` + +Modified schema definition: + + schema { + query: ServiceOneResponse + } + + type ServiceOneResponse { + fieldOne: String! + countries: [Country!]! + } + + type ServiceTwoResponse { + fieldTwo: String + serviceOneField: String + serviceOneResponse: ServiceOneResponse + } + + type Country { + name: String! + } + +Refer to pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go:632 +Case name: TestGraphQLDataSource/nested_graphql_engines + +If we didn't do this transformation, the normalization would fail because it's not possible +to traverse the AST as there's a mismatch between the upstream Operation and the schema. + +If the nested Query can be rewritten so that it's a valid Query against the existing schema, fine. +However, when rewriting the nested Query onto the schema's Query type, +it might be the case that no FieldDefinition exists for the rewritten root field. +In that case, we transform the schema so that normalization and printing of the upstream Query succeeds. +*/ +func (p *Planner) replaceQueryType(definition *ast.Document) { + if !p.isNested || p.config.Federation.Enabled { + return + } + + queryTypeName := definition.Index.QueryTypeName + queryNode, exists := definition.Index.FirstNodeByNameBytes(queryTypeName) + if !exists || queryNode.Kind != ast.NodeKindObjectTypeDefinition { + return + } + + // check that query type has rootFieldName within its fields + hasField := definition.FieldDefinitionsContainField(definition.ObjectTypeDefinitions[queryNode.Ref].FieldsDefinition.Refs, []byte(p.rootFieldName)) + if hasField { + return + } + + definition.RemoveObjectTypeDefinition(definition.Index.QueryTypeName) + definition.ReplaceRootOperationTypeDefinition(p.rootTypeName, ast.OperationTypeQuery) +} + +// normalizeOperation - normalizes operation against definition. +func (p *Planner) normalizeOperation(operation, definition *ast.Document, report *operationreport.Report) (ok bool) { + report.Reset() + normalizer := astnormalization.NewWithOpts( + astnormalization.WithExtractVariables(), + astnormalization.WithRemoveFragmentDefinitions(), + astnormalization.WithRemoveUnusedVariables(), + ) + normalizer.NormalizeOperation(operation, definition, report) + + return !report.HasErrors() +} + +// addField - add a field to an upstream operation +func (p *Planner) addField(ref int) { + fieldName := p.visitor.Operation.FieldNameString(ref) + + alias := ast.Alias{ + IsDefined: p.visitor.Operation.FieldAliasIsDefined(ref), + } + + if alias.IsDefined { + aliasBytes := p.visitor.Operation.FieldAliasBytes(ref) + alias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes) + } + + typeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition) + for i := range p.visitor.Config.Fields { + isDesiredField := p.visitor.Config.Fields[i].TypeName == typeName && + p.visitor.Config.Fields[i].FieldName == fieldName + + // check that we are on a desired field and field path contains a single element - mapping is plain + if isDesiredField && len(p.visitor.Config.Fields[i].Path) == 1 { + // define alias when mapping path differs from fieldName and no alias has been defined + if p.visitor.Config.Fields[i].Path[0] != fieldName && !alias.IsDefined { + alias.IsDefined = true + aliasBytes := p.visitor.Operation.FieldNameBytes(ref) + alias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes) + } + + // override fieldName with mapping path value + fieldName = p.visitor.Config.Fields[i].Path[0] + + // when provided field is a root type field save new field name + if ref == p.rootFieldRef { + p.rootFieldName = fieldName + } + + break + } + } + + field := p.upstreamOperation.AddField(ast.Field{ + Name: p.upstreamOperation.Input.AppendInputString(fieldName), + Alias: alias, + }) + + selection := ast.Selection{ + Kind: ast.SelectionKindField, + Ref: field.Ref, + } + + p.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection) + p.nodes = append(p.nodes, field) +} + +type OnWsConnectionInitCallback func(ctx context.Context, url string, header http.Header) (json.RawMessage, error) + +type Factory struct { + BatchFactory resolve.DataSourceBatchFactory + HTTPClient *http.Client + StreamingClient *http.Client + OnWsConnectionInitCallback *OnWsConnectionInitCallback + SubscriptionClient *SubscriptionClient +} + +func (f *Factory) Planner(ctx context.Context) plan.DataSourcePlanner { + if f.SubscriptionClient == nil { + opts := make([]Options, 0) + if f.OnWsConnectionInitCallback != nil { + opts = append(opts, WithOnWsConnectionInitCallback(f.OnWsConnectionInitCallback)) + } + + f.SubscriptionClient = NewGraphQLSubscriptionClient(f.HTTPClient, f.StreamingClient, ctx, opts...) + } else if f.SubscriptionClient.engineCtx == nil { + f.SubscriptionClient.engineCtx = ctx + } + return &Planner{ + batchFactory: f.BatchFactory, + fetchClient: f.HTTPClient, + subscriptionClient: f.SubscriptionClient, + } +} + +type Source struct { + httpClient *http.Client +} + +func (s *Source) compactAndUnNullVariables(input []byte, undefinedVariables []string) []byte { + variables, _, _, err := jsonparser.Get(input, "body", "variables") + if err != nil { + return input + } + if bytes.Equal(variables, []byte("null")) || bytes.Equal(variables, []byte("{}")) { + return input + } + if bytes.ContainsAny(variables, " \t\n\r") { + buf := bytes.NewBuffer(make([]byte, 0, len(variables))) + _ = json.Compact(buf, variables) + variables = buf.Bytes() + } + + removeNullVariables := httpclient.IsInputFlagSet(input, httpclient.UNNULLVARIABLES) + variables = s.cleanupVariables(variables, removeNullVariables, undefinedVariables) + + input, _ = jsonparser.Set(input, variables, "body", "variables") + return input +} + +// cleanupVariables removes null variables and empty objects from the input if removeNullVariables is true +// otherwise returns the input as is +func (s *Source) cleanupVariables(variables []byte, removeNullVariables bool, undefinedVariables []string) []byte { + cp := make([]byte, len(variables)) + copy(cp, variables) + + // remove null variables from JSON: {"a":null,"b":1} -> {"b":1} + err := jsonparser.ObjectEach(variables, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + if dataType == jsonparser.Null { + stringKey := unsafebytes.BytesToString(key) + // original code uses: slices.Contains (not supported with go 1.16) + // if removeNullVariables || slices.Contains(undefinedVariables, stringKey) { + // cp = jsonparser.Delete(cp, stringKey) + // } + containsStringKey := false + if !removeNullVariables { + for i := 0; i < len(undefinedVariables); i++ { + if undefinedVariables[i] == stringKey { + containsStringKey = true + break + } + } + } + if removeNullVariables || containsStringKey { + cp = jsonparser.Delete(cp, stringKey) + } + } + return nil + }) + if err != nil { + return variables + } + + // remove empty objects + if removeNullVariables { + cp = s.removeEmptyObjects(cp) + } + + return cp +} + +// removeEmptyObjects removes empty objects from JSON: {"b": "b", "c": {}} -> {"b": "b"} +func (s *Source) removeEmptyObjects(variables []byte) []byte { + var changed bool + for { + variables, changed = s.replaceEmptyObject(variables) + if !changed { + break + } + } + return variables +} + +func (s *Source) replaceEmptyObject(variables []byte) ([]byte, bool) { + if i := bytes.Index(variables, []byte(":{}")); i != -1 { + end := i + 3 + hasTrailingComma := false + if variables[end] == ',' { + end++ + hasTrailingComma = true + } + startQuote := bytes.LastIndex(variables[:i-2], []byte("\"")) + if !hasTrailingComma && variables[startQuote-1] == ',' { + startQuote-- + } + return append(variables[:startQuote], variables[end:]...), true + } + + return variables, false +} + +func (s *Source) Load(ctx context.Context, input []byte, writer io.Writer) (err error) { + undefinedVariables := httpclient.CtxGetUndefinedVariables(ctx) + + input = s.compactAndUnNullVariables(input, undefinedVariables) + return httpclient.Do(s.httpClient, ctx, input, writer) +} + +type GraphQLSubscriptionClient interface { + Subscribe(ctx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error +} + +type GraphQLSubscriptionOptions struct { + URL string `json:"url"` + Body GraphQLBody `json:"body"` + Header http.Header `json:"header"` + UseSSE bool `json:"use_sse"` + SSEMethodPost bool `json:"sse_method_post"` +} + +type GraphQLBody struct { + Query string `json:"query,omitempty"` + OperationName string `json:"operationName,omitempty"` + Variables json.RawMessage `json:"variables,omitempty"` + Extensions json.RawMessage `json:"extensions,omitempty"` +} + +type SubscriptionSource struct { + client GraphQLSubscriptionClient +} + +func (s *SubscriptionSource) Start(ctx context.Context, input []byte, next chan<- []byte) error { + var options GraphQLSubscriptionOptions + err := json.Unmarshal(input, &options) + if err != nil { + return err + } + if options.Body.Query == "" { + return resolve.ErrUnableToResolve + } + return s.client.Subscribe(ctx, options, next) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_sse_handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_sse_handler.go new file mode 100644 index 00000000000..9782280a524 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_sse_handler.go @@ -0,0 +1,281 @@ +package graphql_datasource + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + + "github.com/buger/jsonparser" + log "github.com/jensneuse/abstractlogger" + "github.com/r3labs/sse/v2" +) + +var ( + headerData = []byte("data:") + headerEvent = []byte("event:") + + eventTypeComplete = []byte("complete") + eventTypeNext = []byte("next") +) + +type gqlSSEConnectionHandler struct { + conn *http.Client + ctx context.Context + log log.Logger + options GraphQLSubscriptionOptions +} + +func newSSEConnectionHandler(ctx context.Context, conn *http.Client, opts GraphQLSubscriptionOptions, l log.Logger) *gqlSSEConnectionHandler { + return &gqlSSEConnectionHandler{ + conn: conn, + ctx: ctx, + log: l, + options: opts, + } +} + +func (h *gqlSSEConnectionHandler) StartBlocking(sub Subscription) { + reqCtx := sub.ctx + + dataCh := make(chan []byte) + errCh := make(chan []byte) + defer func() { + close(dataCh) + close(errCh) + close(sub.next) + }() + + go h.subscribe(reqCtx, sub, dataCh, errCh) + + for { + select { + case data := <-dataCh: + sub.next <- data + case err := <-errCh: + sub.next <- err + return + case <-reqCtx.Done(): + return + } + } +} + +func (h *gqlSSEConnectionHandler) subscribe(ctx context.Context, sub Subscription, dataCh, errCh chan []byte) { + resp, err := h.performSubscriptionRequest(ctx) + if err != nil { + h.log.Error("failed to perform subscription request", log.Error(err)) + + if ctx.Err() != nil { + // request context was canceled do not send an error as channel will be closed + return + } + + sub.next <- []byte(internalError) + + return + } + defer func() { + _ = resp.Body.Close() + }() + + reader := sse.NewEventStreamReader(resp.Body, math.MaxInt32) + + for { + if ctx.Err() != nil { + return + } + + msg, err := reader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + + h.log.Error("failed to read event", log.Error(err)) + + errCh <- []byte(internalError) + return + } + + if len(msg) == 0 { + continue + } + + // normalize the crlf to lf to make it easier to split the lines. + // split the line by "\n" or "\r", per the spec. + lines := bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) + for _, line := range lines { + switch { + case bytes.HasPrefix(line, headerData): + data := trim(line[len(headerData):]) + + if len(data) == 0 { + continue + } + + dataCh <- data + case bytes.HasPrefix(line, headerEvent): + event := trim(line[len(headerEvent):]) + + switch { + case bytes.Equal(event, eventTypeComplete): + return + case bytes.Equal(event, eventTypeNext): + continue + } + case bytes.HasPrefix(msg, []byte(":")): + // according to the spec, we ignore messages starting with a colon + continue + default: + // ideally we should not get here, or if we do, we should ignore it + // but some providers send a json object with the error messages, without the event header + + // check for errors which came without event header + data := trim(line) + + val, valueType, _, err := jsonparser.Get(data, "errors") + switch err { + case jsonparser.KeyPathNotFoundError: + continue + case jsonparser.MalformedJsonError: + // ignore garbage + continue + case nil: + if valueType == jsonparser.Array { + response := []byte(`{}`) + response, err = jsonparser.Set(response, val, "errors") + if err != nil { + h.log.Error("failed to set errors", log.Error(err)) + + errCh <- []byte(internalError) + return + } + + errCh <- response + return + } else if valueType == jsonparser.Object { + response := []byte(`{"errors":[]}`) + response, err = jsonparser.Set(response, val, "errors", "[0]") + if err != nil { + h.log.Error("failed to set errors", log.Error(err)) + + errCh <- []byte(internalError) + return + } + + errCh <- response + return + } + + default: + h.log.Error("failed to parse errors", log.Error(err)) + errCh <- []byte(internalError) + return + } + } + } + } +} + +func trim(data []byte) []byte { + // remove the leading space + data = bytes.TrimLeft(data, " \t") + + // remove the trailing new line + data = bytes.TrimRight(data, "\n") + + return data +} + +func (h *gqlSSEConnectionHandler) performSubscriptionRequest(ctx context.Context) (*http.Response, error) { + + var req *http.Request + var err error + + // default to GET requests when SSEMethodPost is not enabled in the SubscriptionConfiguration + if h.options.SSEMethodPost { + req, err = h.buildPOSTRequest(ctx) + } else { + req, err = h.buildGETRequest(ctx) + } + + if err != nil { + return nil, err + } + + resp, err := h.conn.Do(req) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + return resp, nil + default: + return nil, fmt.Errorf("failed to connect to stream unexpected resp status code: %d", resp.StatusCode) + } +} + +func (h *gqlSSEConnectionHandler) buildGETRequest(ctx context.Context) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, "GET", h.options.URL, nil) + if err != nil { + return nil, err + } + + if h.options.Header != nil { + req.Header = h.options.Header + } + + req.Header.Set("Accept", "text/event-stream") + req.Header.Set("Connection", "keep-alive") + req.Header.Set("Cache-Control", "no-cache") + + query := req.URL.Query() + query.Add("query", h.options.Body.Query) + + if h.options.Body.Variables != nil { + variables, _ := h.options.Body.Variables.MarshalJSON() + + query.Add("variables", string(variables)) + } + + if h.options.Body.OperationName != "" { + query.Add("operationName", h.options.Body.OperationName) + } + + if h.options.Body.Extensions != nil { + extensions, _ := h.options.Body.Extensions.MarshalJSON() + + query.Add("extensions", string(extensions)) + } + + req.URL.RawQuery = query.Encode() + + return req, nil +} + +func (h *gqlSSEConnectionHandler) buildPOSTRequest(ctx context.Context) (*http.Request, error) { + body, err := json.Marshal(h.options.Body) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, "POST", h.options.URL, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + if h.options.Header != nil { + req.Header = h.options.Header + } + + req.Header.Set("Accept", "text/event-stream") + req.Header.Set("Connection", "keep-alive") + req.Header.Set("Cache-Control", "no-cache") + + return req, nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_subscription_client.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_subscription_client.go new file mode 100644 index 00000000000..2b8a955f23d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_subscription_client.go @@ -0,0 +1,328 @@ +package graphql_datasource + +import ( + "context" + "fmt" + "math" + "net/http" + "sync" + "time" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + "github.com/jensneuse/abstractlogger" + "nhooyr.io/websocket" +) + +const ackWaitTimeout = 30 * time.Second + +// SubscriptionClient allows running multiple subscriptions via the same WebSocket either SSE connection +// It takes care of de-duplicating connections to the same origin under certain circumstances +// If Hash(URL,Body,Headers) result in the same result, an existing connection is re-used +type SubscriptionClient struct { + streamingClient *http.Client + httpClient *http.Client + engineCtx context.Context + log abstractlogger.Logger + hashPool sync.Pool + handlers map[uint64]ConnectionHandler + handlersMu sync.Mutex + wsSubProtocol string + onWsConnectionInitCallback *OnWsConnectionInitCallback + + readTimeout time.Duration +} + +type Options func(options *opts) + +func WithLogger(log abstractlogger.Logger) Options { + return func(options *opts) { + options.log = log + } +} + +func WithReadTimeout(timeout time.Duration) Options { + return func(options *opts) { + options.readTimeout = timeout + } +} + +func WithWSSubProtocol(protocol string) Options { + return func(options *opts) { + options.wsSubProtocol = protocol + } +} + +func WithOnWsConnectionInitCallback(callback *OnWsConnectionInitCallback) Options { + return func(options *opts) { + options.onWsConnectionInitCallback = callback + } +} + +type opts struct { + readTimeout time.Duration + log abstractlogger.Logger + wsSubProtocol string + onWsConnectionInitCallback *OnWsConnectionInitCallback +} + +// GraphQLSubscriptionClientFactory abstracts the way of creating a new GraphQLSubscriptionClient. +// This can be very handy for testing purposes. +type GraphQLSubscriptionClientFactory interface { + NewSubscriptionClient(httpClient, streamingClient *http.Client, engineCtx context.Context, options ...Options) GraphQLSubscriptionClient +} + +type DefaultSubscriptionClientFactory struct{} + +func (d *DefaultSubscriptionClientFactory) NewSubscriptionClient(httpClient, streamingClient *http.Client, engineCtx context.Context, options ...Options) GraphQLSubscriptionClient { + return NewGraphQLSubscriptionClient(httpClient, streamingClient, engineCtx, options...) +} + +func NewGraphQLSubscriptionClient(httpClient, streamingClient *http.Client, engineCtx context.Context, options ...Options) *SubscriptionClient { + op := &opts{ + readTimeout: time.Second, + log: abstractlogger.NoopLogger, + } + for _, option := range options { + option(op) + } + return &SubscriptionClient{ + httpClient: httpClient, + streamingClient: streamingClient, + engineCtx: engineCtx, + handlers: make(map[uint64]ConnectionHandler), + log: op.log, + readTimeout: op.readTimeout, + hashPool: sync.Pool{ + New: func() interface{} { + return xxhash.New() + }, + }, + wsSubProtocol: op.wsSubProtocol, + onWsConnectionInitCallback: op.onWsConnectionInitCallback, + } +} + +// Subscribe initiates a new GraphQL Subscription with the origin +// If an existing WS connection with the same ID (Hash) exists, it is being re-used +// If connection protocol is SSE, a new connection is always created +// If no connection exists, the client initiates a new one +func (c *SubscriptionClient) Subscribe(reqCtx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error { + if options.UseSSE { + return c.subscribeSSE(reqCtx, options, next) + } + + return c.subscribeWS(reqCtx, options, next) +} + +func (c *SubscriptionClient) subscribeSSE(reqCtx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error { + if c.streamingClient == nil { + return fmt.Errorf("streaming http client is nil") + } + + sub := Subscription{ + ctx: reqCtx, + options: options, + next: next, + } + + handler := newSSEConnectionHandler(reqCtx, c.streamingClient, options, c.log) + + go func() { + handler.StartBlocking(sub) + }() + + return nil +} + +func (c *SubscriptionClient) subscribeWS(reqCtx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error { + if c.httpClient == nil { + return fmt.Errorf("http client is nil") + } + + sub := Subscription{ + ctx: reqCtx, + options: options, + next: next, + } + + // each WS connection to an origin is uniquely identified by the Hash(URL,Headers,Body) + handlerID, err := c.generateHandlerIDHash(options) + if err != nil { + return err + } + + c.handlersMu.Lock() + defer c.handlersMu.Unlock() + handler, exists := c.handlers[handlerID] + if exists { + select { + case handler.SubscribeCH() <- sub: + case <-reqCtx.Done(): + } + return nil + } + + handler, err = c.newWSConnectionHandler(reqCtx, options) + if err != nil { + return err + } + + c.handlers[handlerID] = handler + + go func(handlerID uint64) { + handler.StartBlocking(sub) + c.handlersMu.Lock() + delete(c.handlers, handlerID) + c.handlersMu.Unlock() + }(handlerID) + + return nil +} + +// generateHandlerIDHash generates a Hash based on: URL and Headers to uniquely identify Upgrade Requests +func (c *SubscriptionClient) generateHandlerIDHash(options GraphQLSubscriptionOptions) (uint64, error) { + var ( + err error + ) + xxh := c.hashPool.Get().(*xxhash.Digest) + defer c.hashPool.Put(xxh) + xxh.Reset() + + _, err = xxh.WriteString(options.URL) + if err != nil { + return 0, err + } + err = options.Header.Write(xxh) + if err != nil { + return 0, err + } + + return xxh.Sum64(), nil +} + +func (c *SubscriptionClient) newWSConnectionHandler(reqCtx context.Context, options GraphQLSubscriptionOptions) (ConnectionHandler, error) { + subProtocols := []string{ProtocolGraphQLWS, ProtocolGraphQLTWS} + if c.wsSubProtocol != "" { + subProtocols = []string{c.wsSubProtocol} + } + + conn, upgradeResponse, err := websocket.Dial(reqCtx, options.URL, &websocket.DialOptions{ + HTTPClient: c.httpClient, + HTTPHeader: options.Header, + CompressionMode: websocket.CompressionDisabled, + Subprotocols: subProtocols, + }) + if err != nil { + return nil, err + } + // Disable the maximum message size limit. Don't use MaxInt64 since + // the nhooyr.io/websocket doesn't handle it correctly on 32 bit systems. + conn.SetReadLimit(math.MaxInt32) + if upgradeResponse.StatusCode != http.StatusSwitchingProtocols { + return nil, fmt.Errorf("upgrade unsuccessful") + } + + connectionInitMessage, err := c.getConnectionInitMessage(reqCtx, options.URL, options.Header) + if err != nil { + return nil, err + } + + // init + ack + err = conn.Write(reqCtx, websocket.MessageText, connectionInitMessage) + if err != nil { + return nil, err + } + + if c.wsSubProtocol == "" { + c.wsSubProtocol = conn.Subprotocol() + } + + if err := waitForAck(reqCtx, conn); err != nil { + return nil, err + } + + switch c.wsSubProtocol { + case ProtocolGraphQLWS: + return newGQLWSConnectionHandler(c.engineCtx, conn, c.readTimeout, c.log), nil + case ProtocolGraphQLTWS: + return newGQLTWSConnectionHandler(c.engineCtx, conn, c.readTimeout, c.log), nil + default: + return nil, fmt.Errorf("unknown protocol %s", conn.Subprotocol()) + } +} + +func (c *SubscriptionClient) getConnectionInitMessage(ctx context.Context, url string, header http.Header) ([]byte, error) { + if c.onWsConnectionInitCallback == nil { + return connectionInitMessage, nil + } + + callback := *c.onWsConnectionInitCallback + + payload, err := callback(ctx, url, header) + if err != nil { + return nil, err + } + + if len(payload) == 0 { + return connectionInitMessage, nil + } + + msg, err := jsonparser.Set(connectionInitMessage, payload, "payload") + if err != nil { + return nil, err + } + + return msg, nil +} + +type ConnectionHandler interface { + StartBlocking(sub Subscription) + SubscribeCH() chan<- Subscription +} + +type Subscription struct { + ctx context.Context + options GraphQLSubscriptionOptions + next chan<- []byte +} + +func waitForAck(ctx context.Context, conn *websocket.Conn) error { + timer := time.NewTimer(ackWaitTimeout) + for { + select { + case <-timer.C: + return fmt.Errorf("timeout while waiting for connection_ack") + default: + } + + msgType, msg, err := conn.Read(ctx) + if err != nil { + return err + } + if msgType != websocket.MessageText { + return fmt.Errorf("unexpected message type") + } + + respType, err := jsonparser.GetString(msg, "type") + if err != nil { + return err + } + + switch respType { + case messageTypeConnectionKeepAlive: + continue + case messageTypePing: + err := conn.Write(ctx, websocket.MessageText, []byte(pongMessage)) + if err != nil { + return fmt.Errorf("failed to send pong message: %w", err) + } + + continue + case messageTypeConnectionAck: + return nil + default: + return fmt.Errorf("expected connection_ack or ka, got %s", respType) + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_tws_handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_tws_handler.go new file mode 100644 index 00000000000..d4883a9941a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_tws_handler.go @@ -0,0 +1,275 @@ +package graphql_datasource + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/buger/jsonparser" + log "github.com/jensneuse/abstractlogger" + "nhooyr.io/websocket" +) + +// gqlTWSConnectionHandler is responsible for handling a connection to an origin +// it is responsible for managing all subscriptions using the underlying WebSocket connection +// if all Subscriptions are complete or cancelled/unsubscribed the handler will terminate +type gqlTWSConnectionHandler struct { + conn *websocket.Conn + ctx context.Context + log log.Logger + subscribeCh chan Subscription + nextSubscriptionID int + subscriptions map[string]Subscription + readTimeout time.Duration +} + +func newGQLTWSConnectionHandler(ctx context.Context, conn *websocket.Conn, rt time.Duration, l log.Logger) *gqlTWSConnectionHandler { + return &gqlTWSConnectionHandler{ + conn: conn, + ctx: ctx, + log: l, + subscribeCh: make(chan Subscription), + nextSubscriptionID: 0, + subscriptions: map[string]Subscription{}, + readTimeout: rt, + } +} + +func (h *gqlTWSConnectionHandler) SubscribeCH() chan<- Subscription { + return h.subscribeCh +} + +func (h *gqlTWSConnectionHandler) StartBlocking(sub Subscription) { + readCtx, cancel := context.WithCancel(h.ctx) + defer func() { + h.unsubscribeAllAndCloseConn() + cancel() + }() + + h.subscribe(sub) + dataCh := make(chan []byte) + errCh := make(chan error) + go h.readBlocking(readCtx, dataCh, errCh) + + for { + if h.ctx.Err() != nil || !h.hasActiveSubscriptions() { + return + } + + select { + case <-time.After(h.readTimeout): + continue + case sub = <-h.subscribeCh: + h.subscribe(sub) + case err := <-errCh: + h.log.Error("gqlWSConnectionHandler.StartBlocking", log.Error(err)) + h.broadcastErrorMessage(err) + return + case data := <-dataCh: + messageType, err := jsonparser.GetString(data, "type") + if err != nil { + continue + } + + switch messageType { + case messageTypePing: + h.handleMessageTypePing() + case messageTypeNext: + h.handleMessageTypeNext(data) + case messageTypeComplete: + h.handleMessageTypeComplete(data) + case messageTypeError: + h.handleMessageTypeError(data) + continue + default: + h.log.Error("unknown message type", log.String("type", messageType)) + continue + } + } + } +} + +func (h *gqlTWSConnectionHandler) unsubscribeAllAndCloseConn() { + for id := range h.subscriptions { + h.unsubscribe(id) + } + _ = h.conn.Close(websocket.StatusNormalClosure, "") +} + +func (h *gqlTWSConnectionHandler) unsubscribe(subscriptionID string) { + sub, ok := h.subscriptions[subscriptionID] + if !ok { + return + } + close(sub.next) + delete(h.subscriptions, subscriptionID) + + req := fmt.Sprintf(completeMessage, subscriptionID) + err := h.conn.Write(h.ctx, websocket.MessageText, []byte(req)) + if err != nil { + h.log.Error("failed to write complete message", log.Error(err)) + } +} + +// subscribe adds a new Subscription to the gqlTWSConnectionHandler and sends the subscribeMessage to the origin +func (h *gqlTWSConnectionHandler) subscribe(sub Subscription) { + graphQLBody, err := json.Marshal(sub.options.Body) + if err != nil { + h.log.Error("failed to marshal GraphQL body", log.Error(err)) + return + } + + h.nextSubscriptionID++ + + subscriptionID := strconv.Itoa(h.nextSubscriptionID) + + subscribeRequest := fmt.Sprintf(subscribeMessage, subscriptionID, string(graphQLBody)) + err = h.conn.Write(h.ctx, websocket.MessageText, []byte(subscribeRequest)) + if err != nil { + h.log.Error("failed to write subscribe message", log.Error(err)) + return + } + + h.subscriptions[subscriptionID] = sub +} + +func (h *gqlTWSConnectionHandler) broadcastErrorMessage(err error) { + errMsg := fmt.Sprintf(errorMessageTemplate, err) + for _, sub := range h.subscriptions { + ctx, cancel := context.WithTimeout(h.ctx, time.Second*5) + select { + case sub.next <- []byte(errMsg): + cancel() + continue + case <-ctx.Done(): + cancel() + continue + } + } +} + +func (h *gqlTWSConnectionHandler) handleMessageTypeComplete(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + close(sub.next) + delete(h.subscriptions, id) +} + +func (h *gqlTWSConnectionHandler) handleMessageTypeError(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + + value, valueType, _, err := jsonparser.Get(data, "payload") + if err != nil { + h.log.Error( + "failed to get payload from error message", + log.Error(err), + log.ByteString("raw message", data), + ) + sub.next <- []byte(internalError) + return + } + + switch valueType { + case jsonparser.Array: + response := []byte(`{}`) + response, err = jsonparser.Set(response, value, "errors") + if err != nil { + h.log.Error( + "failed to set errors response", + log.Error(err), + log.ByteString("raw message", value), + ) + sub.next <- []byte(internalError) + return + } + sub.next <- response + default: + sub.next <- []byte(internalError) + } +} + +func (h *gqlTWSConnectionHandler) handleMessageTypePing() { + err := h.conn.Write(h.ctx, websocket.MessageText, []byte(pongMessage)) + if err != nil { + h.log.Error("failed to write pong message", log.Error(err)) + } +} + +func (h *gqlTWSConnectionHandler) handleMessageTypeNext(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + + value, _, _, err := jsonparser.Get(data, "payload") + if err != nil { + h.log.Error( + "failed to get payload from next message", + log.Error(err), + ) + sub.next <- []byte(internalError) + return + } + + ctx, cancel := context.WithTimeout(h.ctx, time.Second*5) + defer cancel() + + select { + case <-ctx.Done(): + case sub.next <- value: + case <-sub.ctx.Done(): + } +} + +// readBlocking is a dedicated loop running in a separate goroutine +// because the library "nhooyr.io/websocket" doesn't allow reading with a context with Timeout +// we'll block forever on reading until the context of the gqlTWSConnectionHandler stops +func (h *gqlTWSConnectionHandler) readBlocking(ctx context.Context, dataCh chan []byte, errCh chan error) { + for { + msgType, data, err := h.conn.Read(ctx) + if ctx.Err() != nil { + errCh <- err + return + } + if err != nil { + errCh <- err + return + } + if msgType != websocket.MessageText { + continue + } + select { + case dataCh <- data: + case <-ctx.Done(): + return + } + } +} + +func (h *gqlTWSConnectionHandler) hasActiveSubscriptions() (hasActiveSubscriptions bool) { + for id, sub := range h.subscriptions { + if sub.ctx.Err() != nil { + h.unsubscribe(id) + } + } + return len(h.subscriptions) != 0 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_handler.go new file mode 100644 index 00000000000..6f8cdae6ee6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_handler.go @@ -0,0 +1,270 @@ +package graphql_datasource + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/buger/jsonparser" + "github.com/jensneuse/abstractlogger" + "nhooyr.io/websocket" +) + +// gqlWSConnectionHandler is responsible for handling a connection to an origin +// it is responsible for managing all subscriptions using the underlying WebSocket connection +// if all Subscriptions are complete or cancelled/unsubscribed the handler will terminate +type gqlWSConnectionHandler struct { + conn *websocket.Conn + ctx context.Context + log abstractlogger.Logger + subscribeCh chan Subscription + nextSubscriptionID int + subscriptions map[string]Subscription + readTimeout time.Duration +} + +func newGQLWSConnectionHandler(ctx context.Context, conn *websocket.Conn, readTimeout time.Duration, log abstractlogger.Logger) *gqlWSConnectionHandler { + return &gqlWSConnectionHandler{ + conn: conn, + ctx: ctx, + log: log, + subscribeCh: make(chan Subscription), + nextSubscriptionID: 0, + subscriptions: map[string]Subscription{}, + readTimeout: readTimeout, + } +} + +func (h *gqlWSConnectionHandler) SubscribeCH() chan<- Subscription { + return h.subscribeCh +} + +// StartBlocking starts the single threaded event loop of the handler +// if the global context returns or the websocket connection is terminated, it will stop +func (h *gqlWSConnectionHandler) StartBlocking(sub Subscription) { + readCtx, cancel := context.WithCancel(h.ctx) + defer func() { + h.unsubscribeAllAndCloseConn() + cancel() + }() + h.subscribe(sub) + dataCh := make(chan []byte) + errCh := make(chan error) + go h.readBlocking(readCtx, dataCh, errCh) + for { + err := h.ctx.Err() + if err != nil { + h.log.Error("gqlWSConnectionHandler.StartBlocking", abstractlogger.Error(err)) + h.broadcastErrorMessage(err) + return + } + hasActiveSubscriptions := h.checkActiveSubscriptions() + if !hasActiveSubscriptions { + return + } + select { + case <-time.After(h.readTimeout): + continue + case sub = <-h.subscribeCh: + h.subscribe(sub) + case err = <-errCh: + h.log.Error("gqlWSConnectionHandler.StartBlocking", abstractlogger.Error(err)) + h.broadcastErrorMessage(err) + return + case data := <-dataCh: + messageType, err := jsonparser.GetString(data, "type") + if err != nil { + continue + } + switch messageType { + case messageTypeData: + h.handleMessageTypeData(data) + case messageTypeComplete: + h.handleMessageTypeComplete(data) + case messageTypeConnectionError: + h.handleMessageTypeConnectionError() + return + case messageTypeError: + h.handleMessageTypeError(data) + continue + default: + continue + } + } + } +} + +// readBlocking is a dedicated loop running in a separate goroutine +// because the library "nhooyr.io/websocket" doesn't allow reading with a context with Timeout +// we'll block forever on reading until the context of the gqlWSConnectionHandler stops +func (h *gqlWSConnectionHandler) readBlocking(ctx context.Context, dataCh chan []byte, errCh chan error) { + for { + msgType, data, err := h.conn.Read(ctx) + if ctx.Err() != nil { + errCh <- ctx.Err() + return + } + if err != nil { + errCh <- err + return + } + if msgType != websocket.MessageText { + continue + } + select { + case dataCh <- data: + case <-ctx.Done(): + return + } + } +} + +func (h *gqlWSConnectionHandler) unsubscribeAllAndCloseConn() { + for id := range h.subscriptions { + h.unsubscribe(id) + } + _ = h.conn.Close(websocket.StatusNormalClosure, "") +} + +// subscribe adds a new Subscription to the gqlWSConnectionHandler and sends the startMessage to the origin +func (h *gqlWSConnectionHandler) subscribe(sub Subscription) { + graphQLBody, err := json.Marshal(sub.options.Body) + if err != nil { + return + } + + h.nextSubscriptionID++ + + subscriptionID := strconv.Itoa(h.nextSubscriptionID) + + startRequest := fmt.Sprintf(startMessage, subscriptionID, string(graphQLBody)) + err = h.conn.Write(h.ctx, websocket.MessageText, []byte(startRequest)) + if err != nil { + return + } + + h.subscriptions[subscriptionID] = sub +} + +func (h *gqlWSConnectionHandler) handleMessageTypeData(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + payload, _, _, err := jsonparser.Get(data, "payload") + if err != nil { + return + } + ctx, cancel := context.WithTimeout(h.ctx, time.Second*5) + defer cancel() + + select { + case <-ctx.Done(): + case sub.next <- payload: + case <-sub.ctx.Done(): + } +} + +func (h *gqlWSConnectionHandler) handleMessageTypeConnectionError() { + for _, sub := range h.subscriptions { + ctx, cancel := context.WithTimeout(h.ctx, time.Second*5) + select { + case sub.next <- []byte(connectionError): + cancel() + continue + case <-ctx.Done(): + cancel() + continue + } + } +} + +func (h *gqlWSConnectionHandler) broadcastErrorMessage(err error) { + errMsg := fmt.Sprintf(errorMessageTemplate, err) + for _, sub := range h.subscriptions { + ctx, cancel := context.WithTimeout(h.ctx, time.Second*5) + select { + case sub.next <- []byte(errMsg): + cancel() + continue + case <-ctx.Done(): + cancel() + continue + } + } +} + +func (h *gqlWSConnectionHandler) handleMessageTypeComplete(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + close(sub.next) + delete(h.subscriptions, id) +} + +func (h *gqlWSConnectionHandler) handleMessageTypeError(data []byte) { + id, err := jsonparser.GetString(data, "id") + if err != nil { + return + } + sub, ok := h.subscriptions[id] + if !ok { + return + } + value, valueType, _, err := jsonparser.Get(data, "payload") + if err != nil { + sub.next <- []byte(internalError) + return + } + switch valueType { + case jsonparser.Array: + response := []byte(`{}`) + response, err = jsonparser.Set(response, value, "errors") + if err != nil { + sub.next <- []byte(internalError) + return + } + sub.next <- response + case jsonparser.Object: + response := []byte(`{"errors":[]}`) + response, err = jsonparser.Set(response, value, "errors", "[0]") + if err != nil { + sub.next <- []byte(internalError) + return + } + sub.next <- response + default: + sub.next <- []byte(internalError) + } +} + +func (h *gqlWSConnectionHandler) unsubscribe(subscriptionID string) { + sub, ok := h.subscriptions[subscriptionID] + if !ok { + return + } + close(sub.next) + delete(h.subscriptions, subscriptionID) + stopRequest := fmt.Sprintf(stopMessage, subscriptionID) + _ = h.conn.Write(h.ctx, websocket.MessageText, []byte(stopRequest)) +} + +func (h *gqlWSConnectionHandler) checkActiveSubscriptions() (hasActiveSubscriptions bool) { + for id, sub := range h.subscriptions { + if sub.ctx.Err() != nil { + h.unsubscribe(id) + } + } + return len(h.subscriptions) != 0 +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_proto_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_proto_types.go new file mode 100644 index 00000000000..2569d583832 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource/graphql_ws_proto_types.go @@ -0,0 +1,45 @@ +package graphql_datasource + +// common +var ( + connectionInitMessage = []byte(`{"type":"connection_init"}`) +) + +const ( + messageTypeConnectionAck = "connection_ack" + messageTypeComplete = "complete" + messageTypeError = "error" +) + +// websocket sub-protocol: +// https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md +const ( + ProtocolGraphQLWS = "graphql-ws" + + startMessage = `{"type":"start","id":"%s","payload":%s}` + stopMessage = `{"type":"stop","id":"%s"}` + + messageTypeConnectionKeepAlive = "ka" + messageTypeData = "data" + messageTypeConnectionError = "connection_error" +) + +// websocket sub-protocol: +// https://github.com/enisdenjo/graphql-ws/blob/master/PROTOCOL.md +const ( + ProtocolGraphQLTWS = "graphql-transport-ws" + + subscribeMessage = `{"id":"%s","type":"subscribe","payload":%s}` + pongMessage = `{"type":"pong"}` + completeMessage = `{"id":"%s","type":"complete"}` + + messageTypePing = "ping" + messageTypeNext = "next" +) + +// internal +const ( + internalError = `{"errors":[{"message":"internal error"}]}` + connectionError = `{"errors":[{"message":"connection error"}]}` + errorMessageTemplate = `{"errors":[{"message":"%s"}]}` +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/httpclient.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/httpclient.go new file mode 100644 index 00000000000..cb7ce85467d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/httpclient.go @@ -0,0 +1,235 @@ +package httpclient + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/buger/jsonparser" + bytetemplate "github.com/jensneuse/byte-template" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/quotes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type ctxKey string + +const ( + PATH = "path" + URL = "url" + URLENCODEBODY = "url_encode_body" + BASEURL = "base_url" + METHOD = "method" + BODY = "body" + HEADER = "header" + QUERYPARAMS = "query_params" + USESSE = "use_sse" + SSEMETHODPOST = "sse_method_post" + SCHEME = "scheme" + HOST = "host" + UNNULLVARIABLES = "unnull_variables" + + removeUndefinedVariables ctxKey = "remove_undefined_variables" +) + +var ( + inputPaths = [][]string{ + {URL}, + {METHOD}, + {BODY}, + {HEADER}, + {QUERYPARAMS}, + } + subscriptionInputPaths = [][]string{ + {URL}, + {HEADER}, + {BODY}, + } +) + +func CtxSetUndefinedVariables(ctx context.Context, undefinedVariables []string) context.Context { + return context.WithValue(ctx, removeUndefinedVariables, undefinedVariables) +} + +func CtxGetUndefinedVariables(ctx context.Context) []string { + undefinedVariables, _ := ctx.Value(removeUndefinedVariables).([]string) + return undefinedVariables +} + +func wrapQuotesIfString(b []byte) []byte { + + if bytes.HasPrefix(b, []byte("$$")) && bytes.HasSuffix(b, []byte("$$")) { + return b + } + + if bytes.HasPrefix(b, []byte("{{")) && bytes.HasSuffix(b, []byte("}}")) { + return b + } + + inType := gjson.ParseBytes(b).Type + switch inType { + case gjson.Number, gjson.String: + return b + case gjson.JSON: + var value interface{} + withoutTemplate := bytes.ReplaceAll(b, []byte("$$"), nil) + + buf := &bytes.Buffer{} + tmpl := bytetemplate.New() + _, _ = tmpl.Execute(buf, withoutTemplate, func(w io.Writer, path []byte) (n int, err error) { + return w.Write([]byte("0")) + }) + + withoutTemplate = buf.Bytes() + + err := json.Unmarshal(withoutTemplate, &value) + if err == nil { + return b + } + case gjson.False: + if bytes.Equal(b, literal.FALSE) { + return b + } + case gjson.True: + if bytes.Equal(b, literal.TRUE) { + return b + } + case gjson.Null: + if bytes.Equal(b, literal.NULL) { + return b + } + } + return quotes.WrapBytes(b) +} + +func SetInputURL(input, url []byte) []byte { + if len(url) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, URL, wrapQuotesIfString(url)) + return out +} + +func SetInputURLEncodeBody(input []byte, urlEncodeBody bool) []byte { + if !urlEncodeBody { + return input + } + out, _ := sjson.SetRawBytes(input, URLENCODEBODY, []byte("true")) + return out +} + +func SetInputFlag(input []byte, flagName string) []byte { + out, _ := sjson.SetRawBytes(input, flagName, []byte("true")) + return out +} + +func IsInputFlagSet(input []byte, flagName string) bool { + value, dataType, _, err := jsonparser.Get(input, flagName) + if err != nil { + return false + } + if dataType != jsonparser.Boolean { + return false + } + return bytes.Equal(value, literal.TRUE) +} + +func SetInputMethod(input, method []byte) []byte { + if len(method) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, METHOD, wrapQuotesIfString(method)) + return out +} + +func SetInputBody(input, body []byte) []byte { + return SetInputBodyWithPath(input, body, "") +} + +func SetInputBodyWithPath(input, body []byte, path string) []byte { + if len(body) == 0 { + return input + } + if path != "" { + path = BODY + "." + path + } else { + path = BODY + } + out, _ := sjson.SetRawBytes(input, path, wrapQuotesIfString(body)) + return out +} + +func SetInputHeader(input, headers []byte) []byte { + if len(headers) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, HEADER, wrapQuotesIfString(headers)) + return out +} + +func SetInputQueryParams(input, queryParams []byte) []byte { + if len(queryParams) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, QUERYPARAMS, wrapQuotesIfString(queryParams)) + return out +} + +func SetInputScheme(input, scheme []byte) []byte { + if len(scheme) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, SCHEME, wrapQuotesIfString(scheme)) + return out +} + +func SetInputHost(input, host []byte) []byte { + if len(host) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, HOST, wrapQuotesIfString(host)) + return out +} + +func SetInputPath(input, path []byte) []byte { + if len(path) == 0 { + return input + } + out, _ := sjson.SetRawBytes(input, PATH, wrapQuotesIfString(path)) + return out +} + +func requestInputParams(input []byte) (url, method, body, headers, queryParams []byte) { + jsonparser.EachKey(input, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { + switch i { + case 0: + url = bytes + case 1: + method = bytes + case 2: + body = bytes + case 3: + headers = bytes + case 4: + queryParams = bytes + } + }, inputPaths...) + return +} + +func GetSubscriptionInput(input []byte) (url, header, body []byte) { + jsonparser.EachKey(input, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { + switch i { + case 0: + url = bytes + case 1: + header = bytes + case 2: + body = bytes + } + }, subscriptionInputPaths...) + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/nethttpclient.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/nethttpclient.go new file mode 100644 index 00000000000..20f17a8d6af --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient/nethttpclient.go @@ -0,0 +1,124 @@ +package httpclient + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "context" + "io" + "net/http" + "time" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +const ( + ContentEncodingHeader = "Content-Encoding" + AcceptEncodingHeader = "Accept-Encoding" +) + +var ( + DefaultNetHttpClient = &http.Client{ + Timeout: time.Second * 10, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1024, + TLSHandshakeTimeout: 0 * time.Second, + }, + } + queryParamsKeys = [][]string{ + {"name"}, + {"value"}, + } +) + +func Do(client *http.Client, ctx context.Context, requestInput []byte, out io.Writer) (err error) { + + url, method, body, headers, queryParams := requestInputParams(requestInput) + + request, err := http.NewRequestWithContext(ctx, string(method), string(url), bytes.NewReader(body)) + if err != nil { + return err + } + + if headers != nil { + err = jsonparser.ObjectEach(headers, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + _, err := jsonparser.ArrayEach(value, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + if err != nil { + return + } + if len(value) == 0 { + return + } + request.Header.Add(string(key), string(value)) + }) + return err + }) + if err != nil { + return err + } + } + + if queryParams != nil { + query := request.URL.Query() + _, err = jsonparser.ArrayEach(queryParams, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + var ( + parameterName, parameterValue []byte + ) + jsonparser.EachKey(value, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { + switch i { + case 0: + parameterName = bytes + case 1: + parameterValue = bytes + } + }, queryParamsKeys...) + if len(parameterName) != 0 && len(parameterValue) != 0 { + if bytes.Equal(parameterValue[:1], literal.LBRACK) { + _, _ = jsonparser.ArrayEach(parameterValue, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + query.Add(string(parameterName), string(value)) + }) + } else { + query.Add(string(parameterName), string(parameterValue)) + } + } + }) + if err != nil { + return err + } + request.URL.RawQuery = query.Encode() + } + + request.Header.Add("accept", "application/json") + request.Header.Add("content-type", "application/json") + + response, err := client.Do(request) + if err != nil { + return err + } + defer response.Body.Close() + + respReader, err := respBodyReader(request, response) + if err != nil { + return err + } + + _, err = io.Copy(out, respReader) + return +} + +func respBodyReader(req *http.Request, resp *http.Response) (io.ReadCloser, error) { + if req.Header.Get(AcceptEncodingHeader) == "" { + return resp.Body, nil + } + + switch resp.Header.Get(ContentEncodingHeader) { + case "gzip": + return gzip.NewReader(resp.Body) + case "deflate": + return flate.NewReader(resp.Body), nil + } + + return resp.Body, nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/config_factory.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/config_factory.go new file mode 100644 index 00000000000..42b2609472e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/config_factory.go @@ -0,0 +1,70 @@ +package introspection_datasource + +import ( + "encoding/json" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type IntrospectionConfigFactory struct { + introspectionData *introspection.Data +} + +func NewIntrospectionConfigFactory(schema *ast.Document) (*IntrospectionConfigFactory, error) { + var ( + data introspection.Data + report operationreport.Report + ) + gen := introspection.NewGenerator() + gen.Generate(schema, &report, &data) + if report.HasErrors() { + return nil, report + } + + return &IntrospectionConfigFactory{introspectionData: &data}, nil +} + +func (f *IntrospectionConfigFactory) BuildFieldConfigurations() (planFields plan.FieldConfigurations) { + return plan.FieldConfigurations{ + { + TypeName: f.introspectionData.Schema.QueryType.Name, + FieldName: "__schema", + DisableDefaultMapping: true, + }, + { + TypeName: f.introspectionData.Schema.QueryType.Name, + FieldName: "__type", + DisableDefaultMapping: true, + }, + { + TypeName: "__Type", + FieldName: "fields", + DisableDefaultMapping: true, + }, + { + TypeName: "__Type", + FieldName: "enumValues", + DisableDefaultMapping: true, + }, + } +} + +func (f *IntrospectionConfigFactory) BuildDataSourceConfiguration() plan.DataSourceConfiguration { + return plan.DataSourceConfiguration{ + RootNodes: []plan.TypeField{ + { + TypeName: "Query", + FieldNames: []string{"__schema", "__type"}, + }, + { + TypeName: "__Type", + FieldNames: []string{"fields", "enumValues"}, + }, + }, + Factory: NewFactory(f.introspectionData), + Custom: json.RawMessage{}, + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/factory.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/factory.go new file mode 100644 index 00000000000..944aa3cf3a4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/factory.go @@ -0,0 +1,20 @@ +package introspection_datasource + +import ( + "context" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" +) + +type Factory struct { + introspectionData *introspection.Data +} + +func NewFactory(introspectionData *introspection.Data) *Factory { + return &Factory{introspectionData: introspectionData} +} + +func (f *Factory) Planner(_ context.Context) plan.DataSourcePlanner { + return &Planner{introspectionData: f.introspectionData} +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/input.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/input.go new file mode 100644 index 00000000000..c73aab77249 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/input.go @@ -0,0 +1,75 @@ +package introspection_datasource + +import ( + "bytes" + "strconv" +) + +type requestType int + +const ( + SchemaRequestType requestType = iota + 1 + TypeRequestType + TypeFieldsRequestType + TypeEnumValuesRequestType +) + +const ( + schemaFieldName = "__schema" + typeFieldName = "__type" + fieldsFieldName = "fields" + enumValuesFieldName = "enumValues" +) + +type introspectionInput struct { + RequestType requestType `json:"request_type"` + OnTypeName *string `json:"on_type_name"` + TypeName *string `json:"type_name"` + IncludeDeprecated bool `json:"include_deprecated"` +} + +var ( + lBrace = []byte("{") + rBrace = []byte("}") + comma = []byte(",") + requestTypeField = []byte(`"request_type":`) + onTypeField = []byte(`"on_type_name":"{{ .object.name }}"`) + typeNameField = []byte(`"type_name":"{{ .arguments.name }}"`) + includeDeprecatedField = []byte(`"include_deprecated":{{ .arguments.includeDeprecated }}`) +) + +func buildInput(fieldName string) string { + buf := &bytes.Buffer{} + buf.Write(lBrace) + + switch fieldName { + case typeFieldName: + writeRequestTypeField(buf, TypeRequestType) + buf.Write(comma) + buf.Write(typeNameField) + case fieldsFieldName: + writeRequestTypeField(buf, TypeFieldsRequestType) + writeOnTypeFields(buf) + case enumValuesFieldName: + writeRequestTypeField(buf, TypeEnumValuesRequestType) + writeOnTypeFields(buf) + default: + writeRequestTypeField(buf, SchemaRequestType) + } + + buf.Write(rBrace) + + return buf.String() +} + +func writeRequestTypeField(buf *bytes.Buffer, inputType requestType) { + buf.Write(requestTypeField) + buf.Write([]byte(strconv.Itoa(int(inputType)))) +} + +func writeOnTypeFields(buf *bytes.Buffer) { + buf.Write(comma) + buf.Write(onTypeField) + buf.Write(comma) + buf.Write(includeDeprecatedField) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/planner.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/planner.go new file mode 100644 index 00000000000..fb38d610ec0 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/planner.go @@ -0,0 +1,54 @@ +package introspection_datasource + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" +) + +type Planner struct { + introspectionData *introspection.Data + v *plan.Visitor + rootField int +} + +func (p *Planner) Register(visitor *plan.Visitor, _ plan.DataSourceConfiguration, _ bool) error { + p.v = visitor + visitor.Walker.RegisterEnterFieldVisitor(p) + return nil +} + +func (p *Planner) DownstreamResponseFieldAlias(_ int) (alias string, exists bool) { + // the Introspection DataSourcePlanner doesn't rewrite upstream fields: skip + return +} + +func (p *Planner) DataSourcePlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: false, + OverrideFieldPathFromAlias: false, + } +} + +func (p *Planner) EnterField(ref int) { + p.rootField = ref +} + +func (p *Planner) configureInput() string { + fieldName := p.v.Operation.FieldNameString(p.rootField) + + return buildInput(fieldName) +} + +func (p *Planner) ConfigureFetch() plan.FetchConfiguration { + return plan.FetchConfiguration{ + Input: p.configureInput(), + DataSource: &Source{ + introspectionData: p.introspectionData, + }, + } +} + +func (p *Planner) ConfigureSubscription() plan.SubscriptionConfiguration { + // the Introspection DataSourcePlanner doesn't have subscription + return plan.SubscriptionConfiguration{} +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/source.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/source.go new file mode 100644 index 00000000000..2cf96fc4ade --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource/source.go @@ -0,0 +1,102 @@ +package introspection_datasource + +import ( + "context" + "encoding/json" + "io" + + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" +) + +var ( + null = []byte("null") +) + +type Source struct { + introspectionData *introspection.Data +} + +func (s *Source) Load(ctx context.Context, input []byte, w io.Writer) (err error) { + var req introspectionInput + if err := json.Unmarshal(input, &req); err != nil { + return err + } + + switch req.RequestType { + case TypeRequestType: + return s.singleType(w, req.TypeName) + case TypeEnumValuesRequestType: + return s.enumValuesForType(w, req.OnTypeName, req.IncludeDeprecated) + case TypeFieldsRequestType: + return s.fieldsForType(w, req.OnTypeName, req.IncludeDeprecated) + } + + return json.NewEncoder(w).Encode(s.introspectionData.Schema) +} + +func (s *Source) typeInfo(typeName *string) *introspection.FullType { + if typeName == nil { + return nil + } + + for _, fullType := range s.introspectionData.Schema.Types { + if fullType.Name == *typeName { + return &fullType + } + } + return nil +} + +func (s *Source) writeNull(w io.Writer) error { + _, err := w.Write(null) + return err +} + +func (s *Source) singleType(w io.Writer, typeName *string) error { + typeInfo := s.typeInfo(typeName) + if typeInfo == nil { + return s.writeNull(w) + } + + return json.NewEncoder(w).Encode(typeInfo) +} + +func (s *Source) fieldsForType(w io.Writer, typeName *string, includeDeprecated bool) error { + typeInfo := s.typeInfo(typeName) + if typeInfo == nil { + return s.writeNull(w) + } + + if includeDeprecated { + return json.NewEncoder(w).Encode(typeInfo.Fields) + } + + fields := make([]introspection.Field, 0, len(typeInfo.Fields)) + for _, field := range typeInfo.Fields { + if !field.IsDeprecated { + fields = append(fields, field) + } + } + + return json.NewEncoder(w).Encode(fields) +} + +func (s *Source) enumValuesForType(w io.Writer, typeName *string, includeDeprecated bool) error { + typeInfo := s.typeInfo(typeName) + if typeInfo == nil { + return s.writeNull(w) + } + + if includeDeprecated { + return json.NewEncoder(w).Encode(typeInfo.EnumValues) + } + + enumValues := make([]introspection.EnumValue, 0, len(typeInfo.EnumValues)) + for _, enumValue := range typeInfo.EnumValues { + if !enumValue.IsDeprecated { + enumValues = append(enumValues, enumValue) + } + } + + return json.NewEncoder(w).Encode(enumValues) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/config.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/config.go new file mode 100644 index 00000000000..180a5531815 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/config.go @@ -0,0 +1,141 @@ +package kafka_datasource + +import ( + "fmt" + + "github.com/Shopify/sarama" +) + +const ( + IsolationLevelReadUncommitted = "ReadUncommitted" + IsolationLevelReadCommitted = "ReadCommitted" +) + +const DefaultIsolationLevel = IsolationLevelReadUncommitted + +const ( + BalanceStrategyRange = "BalanceStrategyRange" + BalanceStrategySticky = "BalanceStrategySticky" + BalanceStrategyRoundRobin = "BalanceStrategyRoundRobin" +) + +const DefaultBalanceStrategy = BalanceStrategyRange + +var ( + DefaultKafkaVersion = "V1_0_0_0" + SaramaSupportedKafkaVersions = map[string]sarama.KafkaVersion{ + "V0_10_2_0": sarama.V0_10_2_0, + "V0_10_2_1": sarama.V0_10_2_1, + "V0_11_0_0": sarama.V0_11_0_0, + "V0_11_0_1": sarama.V0_11_0_1, + "V0_11_0_2": sarama.V0_11_0_2, + "V1_0_0_0": sarama.V1_0_0_0, + "V1_1_0_0": sarama.V1_1_0_0, + "V1_1_1_0": sarama.V1_1_1_0, + "V2_0_0_0": sarama.V2_0_0_0, + "V2_0_1_0": sarama.V2_0_1_0, + "V2_1_0_0": sarama.V2_1_0_0, + "V2_2_0_0": sarama.V2_2_0_0, + "V2_3_0_0": sarama.V2_3_0_0, + "V2_4_0_0": sarama.V2_4_0_0, + "V2_5_0_0": sarama.V2_5_0_0, + "V2_6_0_0": sarama.V2_6_0_0, + "V2_7_0_0": sarama.V2_7_0_0, + "V2_8_0_0": sarama.V2_8_0_0, + } +) + +type SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool `json:"enable"` + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string `json:"user"` + // Password for SASL/PLAIN authentication + Password string `json:"password"` +} + +type GraphQLSubscriptionOptions struct { + BrokerAddresses []string `json:"broker_addresses"` + Topics []string `json:"topics"` + GroupID string `json:"group_id"` + ClientID string `json:"client_id"` + KafkaVersion string `json:"kafka_version"` + StartConsumingLatest bool `json:"start_consuming_latest"` + BalanceStrategy string `json:"balance_strategy"` + IsolationLevel string `json:"isolation_level"` + SASL SASL `json:"sasl"` + startedCallback func() +} + +func (g *GraphQLSubscriptionOptions) Sanitize() { + if g.KafkaVersion == "" { + g.KafkaVersion = DefaultKafkaVersion + } + + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + if g.BalanceStrategy == "" { + g.BalanceStrategy = DefaultBalanceStrategy + } + + if g.IsolationLevel == "" { + g.IsolationLevel = DefaultIsolationLevel + } +} + +func (g *GraphQLSubscriptionOptions) Validate() error { + switch { + case len(g.BrokerAddresses) == 0: + return fmt.Errorf("broker_addresses cannot be empty") + case len(g.Topics) == 0: + return fmt.Errorf("topics cannot be empty") + case g.GroupID == "": + return fmt.Errorf("group_id cannot be empty") + case g.ClientID == "": + return fmt.Errorf("client_id cannot be empty") + } + + if _, ok := SaramaSupportedKafkaVersions[g.KafkaVersion]; !ok { + return fmt.Errorf("kafka_version is invalid: %s", g.KafkaVersion) + } + + switch g.BalanceStrategy { + case BalanceStrategyRange, BalanceStrategySticky, BalanceStrategyRoundRobin: + default: + return fmt.Errorf("balance_strategy is invalid: %s", g.BalanceStrategy) + } + + switch g.IsolationLevel { + case IsolationLevelReadUncommitted, IsolationLevelReadCommitted: + default: + return fmt.Errorf("isolation_level is invalid: %s", g.IsolationLevel) + } + + if g.SASL.Enable { + switch { + case g.SASL.User == "": + return fmt.Errorf("sasl.user cannot be empty") + case g.SASL.Password == "": + return fmt.Errorf("sasl.password cannot be empty") + } + } + + return nil +} + +type SubscriptionConfiguration struct { + BrokerAddresses []string `json:"broker_addresses"` + Topics []string `json:"topics"` + GroupID string `json:"group_id"` + ClientID string `json:"client_id"` + KafkaVersion string `json:"kafka_version"` + StartConsumingLatest bool `json:"start_consuming_latest"` + BalanceStrategy string `json:"balance_strategy"` + IsolationLevel string `json:"isolation_level"` + SASL SASL `json:"sasl"` +} + +type Configuration struct { + Subscription SubscriptionConfiguration +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_consumer_group.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_consumer_group.go new file mode 100644 index 00000000000..30ee6c0df04 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_consumer_group.go @@ -0,0 +1,323 @@ +package kafka_datasource + +import ( + "context" + "sync" + "time" + + "github.com/Shopify/sarama" + "github.com/buger/jsonparser" + log "github.com/jensneuse/abstractlogger" +) + +const consumerGroupRetryInterval = time.Second + +type KafkaConsumerGroupBridge struct { + log log.Logger + ctx context.Context +} + +type KafkaConsumerGroup struct { + consumerGroup sarama.ConsumerGroup + options *GraphQLSubscriptionOptions + log log.Logger + startedCallback func() + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc +} + +type kafkaConsumerGroupHandler struct { + log log.Logger + startedCallback func() + options *GraphQLSubscriptionOptions + messages chan *sarama.ConsumerMessage + ctx context.Context +} + +// Setup is run at the beginning of a new session, before ConsumeClaim. +func (k *kafkaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { + k.log.Debug("kafkaConsumerGroupHandler.Setup", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID), + ) + return nil +} + +// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited +// but before the offsets are committed for the very last time. +func (k *kafkaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { + k.log.Debug("kafkaConsumerGroupHandler.Cleanup", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID), + ) + return nil +} + +// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). +// Once the Messages() channel is closed, the Handler must finish its processing +// loop and exit. +func (k *kafkaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + if k.options.StartConsumingLatest { + // Reset the offset before start consuming and don't commit the consumed messages. + // In this way, it will only read the latest messages. + session.ResetOffset(claim.Topic(), claim.Partition(), sarama.OffsetNewest, "") + } + + if k.startedCallback != nil { + k.startedCallback() + } + + for msg := range claim.Messages() { + ctx, cancel := context.WithTimeout(k.ctx, time.Second*5) + select { + case k.messages <- msg: + cancel() + // If the client wants to most recent messages, don't commit the + // offset and reset the offset to sarama.OffsetNewest, then start consuming. + if !k.options.StartConsumingLatest { + session.MarkMessage(msg, "") // Commit the message and advance the offset. + } + case <-ctx.Done(): + cancel() + return nil + } + } + k.log.Debug("kafkaConsumerGroupHandler.ConsumeClaim is gone", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID)) + return nil +} + +// NewKafkaConsumerGroup creates a new sarama.ConsumerGroup and returns a new +// *KafkaConsumerGroup instance. +func NewKafkaConsumerGroup(log log.Logger, saramaConfig *sarama.Config, options *GraphQLSubscriptionOptions) (*KafkaConsumerGroup, error) { + cg, err := sarama.NewConsumerGroup(options.BrokerAddresses, options.GroupID, saramaConfig) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + return &KafkaConsumerGroup{ + consumerGroup: cg, + startedCallback: options.startedCallback, + log: log, + options: options, + ctx: ctx, + cancel: cancel, + }, nil +} + +func (k *KafkaConsumerGroup) startConsuming(handler sarama.ConsumerGroupHandler) { + defer k.wg.Done() + + defer func() { + if err := k.consumerGroup.Close(); err != nil { + k.log.Error("KafkaConsumerGroup.Close returned an error", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID), + log.Error(err)) + } + }() + + k.wg.Add(1) + go func() { + defer k.wg.Done() + + // Errors returns a read channel of errors that occurred during the consumer life-cycle. + // By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + for err := range k.consumerGroup.Errors() { + k.log.Error("KafkaConsumerGroup.Consumer", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID), + log.Error(err)) + } + }() + + // From Sarama documents: + // + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. + for { + select { + case <-k.ctx.Done(): + return + default: + } + + k.log.Info("KafkaConsumerGroup.consumerGroup.Consume has been called", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID)) + + // Blocking call + err := k.consumerGroup.Consume(k.ctx, k.options.Topics, handler) + if err != nil { + k.log.Error("KafkaConsumerGroup.startConsuming", + log.Strings("topics", k.options.Topics), + log.String("groupID", k.options.GroupID), + log.String("clientID", k.options.ClientID), + log.Error(err)) + } + // Rebalance or node restart takes time. Every Consume call + // triggers a context switch on the CPU. We should prevent an + // interrupt storm. + <-time.After(consumerGroupRetryInterval) + } +} + +// StartConsuming initializes a new consumer group handler and starts consuming at +// background. +func (k *KafkaConsumerGroup) StartConsuming(messages chan *sarama.ConsumerMessage) { + handler := &kafkaConsumerGroupHandler{ + log: k.log, + startedCallback: k.options.startedCallback, + options: k.options, + messages: messages, + ctx: k.ctx, + } + + k.wg.Add(1) + go k.startConsuming(handler) +} + +// Close stops background goroutines and closes the underlying ConsumerGroup instance. +func (k *KafkaConsumerGroup) Close() error { + select { + case <-k.ctx.Done(): + // Already closed + return nil + default: + } + + k.cancel() + return k.consumerGroup.Close() +} + +// WaitUntilConsumerStop waits until ConsumerGroup.Consume function stops. +func (k *KafkaConsumerGroup) WaitUntilConsumerStop() { + k.wg.Wait() +} + +func NewKafkaConsumerGroupBridge(ctx context.Context, logger log.Logger) *KafkaConsumerGroupBridge { + if logger == nil { + logger = log.NoopLogger + } + return &KafkaConsumerGroupBridge{ + ctx: ctx, + log: logger, + } +} + +func (c *KafkaConsumerGroupBridge) prepareSaramaConfig(options *GraphQLSubscriptionOptions) (*sarama.Config, error) { + sc := sarama.NewConfig() + sc.Version = SaramaSupportedKafkaVersions[options.KafkaVersion] + sc.ClientID = options.ClientID + sc.Consumer.Return.Errors = true + + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // See this: https://chrzaszcz.dev/2021/09/kafka-assignors/ + // Sanitize function doesn't allow an empty BalanceStrategy parameter. + switch options.BalanceStrategy { + case BalanceStrategyRange: + sc.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + case BalanceStrategySticky: + sc.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + case BalanceStrategyRoundRobin: + sc.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + } + + if options.StartConsumingLatest { + // Start consuming from the latest offset after a client restart + sc.Consumer.Offsets.Initial = sarama.OffsetNewest + } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + switch options.IsolationLevel { + case IsolationLevelReadCommitted: + sc.Consumer.IsolationLevel = sarama.ReadCommitted + case IsolationLevelReadUncommitted: + sc.Consumer.IsolationLevel = sarama.ReadUncommitted + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + if options.SASL.Enable { + sc.Net.SASL.Enable = true + sc.Net.SASL.User = options.SASL.User + sc.Net.SASL.Password = options.SASL.Password + } + + return sc, nil +} + +// Subscribe creates a new consumer group with given config and streams messages via next channel. +func (c *KafkaConsumerGroupBridge) Subscribe(ctx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error { + options.Sanitize() + if err := options.Validate(); err != nil { + return err + } + + saramaConfig, err := c.prepareSaramaConfig(&options) + if err != nil { + return err + } + + cg, err := NewKafkaConsumerGroup(c.log, saramaConfig, &options) + if err != nil { + return err + } + + messages := make(chan *sarama.ConsumerMessage) + cg.StartConsuming(messages) + + // Wait for messages. + go func() { + defer func() { + if err := cg.Close(); err != nil { + c.log.Error("KafkaConsumerGroup.Close returned an error", + log.Strings("topics", options.Topics), + log.String("groupID", options.GroupID), + log.String("clientID", options.ClientID), + log.Error(err), + ) + } + close(next) + }() + + for { + select { + case <-c.ctx.Done(): + // Gateway context + return + case <-ctx.Done(): + // Request context + return + case msg, ok := <-messages: + if !ok { + return + } + // The "data" field contains the result of your GraphQL request. + result, err := jsonparser.Set([]byte(`{}`), msg.Value, "data") + if err != nil { + return + } + next <- result + } + } + }() + + return nil +} + +var _ sarama.ConsumerGroupHandler = (*kafkaConsumerGroupHandler)(nil) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_datasource.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_datasource.go new file mode 100644 index 00000000000..d6fb0b29376 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/kafka_datasource/kafka_datasource.go @@ -0,0 +1,74 @@ +package kafka_datasource + +import ( + "context" + "encoding/json" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/jensneuse/abstractlogger" +) + +type Planner struct { + ctx context.Context + config Configuration +} + +func (p *Planner) Register(_ *plan.Visitor, configuration plan.DataSourceConfiguration, _ bool) error { + return json.Unmarshal(configuration.Custom, &p.config) +} + +func (p *Planner) ConfigureFetch() plan.FetchConfiguration { + return plan.FetchConfiguration{} +} + +func (p *Planner) ConfigureSubscription() plan.SubscriptionConfiguration { + input, _ := json.Marshal(p.config.Subscription) + return plan.SubscriptionConfiguration{ + Input: string(input), + DataSource: &SubscriptionSource{ + client: NewKafkaConsumerGroupBridge(p.ctx, abstractlogger.NoopLogger), + }, + } +} + +func (p *Planner) DataSourcePlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: false, + OverrideFieldPathFromAlias: false, + } +} + +func (p *Planner) DownstreamResponseFieldAlias(_ int) (alias string, exists bool) { return } + +type Factory struct{} + +func (f *Factory) Planner(ctx context.Context) plan.DataSourcePlanner { + return &Planner{ + ctx: ctx, + } +} + +func ConfigJSON(config Configuration) json.RawMessage { + out, _ := json.Marshal(config) + return out +} + +type GraphQLSubscriptionClient interface { + Subscribe(ctx context.Context, options GraphQLSubscriptionOptions, next chan<- []byte) error +} + +type SubscriptionSource struct { + client GraphQLSubscriptionClient +} + +func (s *SubscriptionSource) Start(ctx context.Context, input []byte, next chan<- []byte) error { + var options GraphQLSubscriptionOptions + err := json.Unmarshal(input, &options) + if err != nil { + return err + } + return s.client.Subscribe(ctx, options, next) +} + +var _ plan.PlannerFactory = (*Factory)(nil) +var _ plan.DataSourcePlanner = (*Planner)(nil) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/rest_datasource/rest_datasource.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/rest_datasource/rest_datasource.go new file mode 100644 index 00000000000..6c886881020 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/rest_datasource/rest_datasource.go @@ -0,0 +1,172 @@ +package rest_datasource + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "regexp" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type Planner struct { + client *http.Client + v *plan.Visitor + config Configuration + rootField int + operationDefinition int +} + +func (p *Planner) DownstreamResponseFieldAlias(_ int) (alias string, exists bool) { + // the REST DataSourcePlanner doesn't rewrite upstream fields: skip + return +} + +func (p *Planner) DataSourcePlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: false, + OverrideFieldPathFromAlias: false, + } +} + +func (p *Planner) EnterOperationDefinition(ref int) { + p.operationDefinition = ref +} + +type Factory struct { + Client *http.Client +} + +func (f *Factory) Planner(ctx context.Context) plan.DataSourcePlanner { + return &Planner{ + client: f.Client, + } +} + +type Configuration struct { + Fetch FetchConfiguration + Subscription SubscriptionConfiguration +} + +func ConfigJSON(config Configuration) json.RawMessage { + out, _ := json.Marshal(config) + return out +} + +type SubscriptionConfiguration struct { + PollingIntervalMillis int64 + SkipPublishSameResponse bool +} + +type FetchConfiguration struct { + URL string + Method string + Header http.Header + Query []QueryConfiguration + Body string +} + +type QueryConfiguration struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (p *Planner) Register(visitor *plan.Visitor, configuration plan.DataSourceConfiguration, isNested bool) error { + p.v = visitor + visitor.Walker.RegisterEnterFieldVisitor(p) + visitor.Walker.RegisterEnterOperationVisitor(p) + return json.Unmarshal(configuration.Custom, &p.config) +} + +func (p *Planner) EnterField(ref int) { + p.rootField = ref +} + +func (p *Planner) configureInput() []byte { + + input := httpclient.SetInputURL(nil, []byte(p.config.Fetch.URL)) + input = httpclient.SetInputMethod(input, []byte(p.config.Fetch.Method)) + input = httpclient.SetInputBody(input, []byte(p.config.Fetch.Body)) + + header, err := json.Marshal(p.config.Fetch.Header) + if err == nil && len(header) != 0 && !bytes.Equal(header, literal.NULL) { + input = httpclient.SetInputHeader(input, header) + } + + preparedQuery := p.prepareQueryParams(p.rootField, p.config.Fetch.Query) + query, err := json.Marshal(preparedQuery) + if err == nil && len(preparedQuery) != 0 { + input = httpclient.SetInputQueryParams(input, query) + } + return input +} + +func (p *Planner) ConfigureFetch() plan.FetchConfiguration { + input := p.configureInput() + return plan.FetchConfiguration{ + Input: string(input), + DataSource: &Source{ + client: p.client, + }, + DisallowSingleFlight: p.config.Fetch.Method != "GET", + DisableDataLoader: true, + } +} + +func (p *Planner) ConfigureSubscription() plan.SubscriptionConfiguration { + return plan.SubscriptionConfiguration{} +} + +var ( + selectorRegex = regexp.MustCompile(`{{\s(.*?)\s}}`) +) + +func (p *Planner) prepareQueryParams(field int, query []QueryConfiguration) []QueryConfiguration { + out := make([]QueryConfiguration, 0, len(query)) +Next: + for i := range query { + matches := selectorRegex.FindAllStringSubmatch(query[i].Value, -1) + for j := range matches { + if len(matches[j]) == 2 { + path := matches[j][1] + path = strings.TrimPrefix(path, ".") + elements := strings.Split(path, ".") + if len(elements) < 2 { + continue + } + if elements[0] != "arguments" { + continue + } + argumentName := elements[1] + arg, ok := p.v.Operation.FieldArgument(field, []byte(argumentName)) + if !ok { + continue Next + } + value := p.v.Operation.Arguments[arg].Value + if value.Kind != ast.ValueKindVariable { + continue Next + } + variableName := p.v.Operation.VariableValueNameString(value.Ref) + if !p.v.Operation.OperationDefinitionHasVariableDefinition(p.operationDefinition, variableName) { + continue Next + } + } + } + out = append(out, query[i]) + } + return out +} + +type Source struct { + client *http.Client +} + +func (s *Source) Load(ctx context.Context, input []byte, w io.Writer) (err error) { + return httpclient.Do(s.client, ctx, input, w) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/analyze_plan_kind.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/analyze_plan_kind.go new file mode 100644 index 00000000000..2b843192d98 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/analyze_plan_kind.go @@ -0,0 +1,65 @@ +package plan + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func AnalyzePlanKind(operation, definition *ast.Document, operationName string) (subscription, streaming bool, error error) { + walker := astvisitor.NewWalker(48) + visitor := &planKindVisitor{ + Walker: &walker, + operationName: operationName, + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterOperationVisitor(visitor) + walker.RegisterEnterDirectiveVisitor(visitor) + + var report operationreport.Report + walker.Walk(operation, definition, &report) + if report.HasErrors() { + return false, false, report + } + subscription = visitor.isSubscription + streaming = visitor.hasDeferDirective || visitor.hasStreamDirective + return +} + +type planKindVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + operationName string + isSubscription, hasStreamDirective, hasDeferDirective bool +} + +func (p *planKindVisitor) EnterDirective(ref int) { + directiveName := p.operation.DirectiveNameString(ref) + ancestor := p.Ancestors[len(p.Ancestors)-1] + switch ancestor.Kind { + case ast.NodeKindField: + switch directiveName { + case "defer": + p.hasDeferDirective = true + case "stream": + p.hasStreamDirective = true + } + } +} + +func (p *planKindVisitor) EnterOperationDefinition(ref int) { + name := p.operation.OperationDefinitionNameString(ref) + if p.operationName != name { + p.SkipNode() + return + } + switch p.operation.OperationDefinitions[ref].OperationType { + case ast.OperationTypeSubscription: + p.isSubscription = true + } +} + +func (p *planKindVisitor) EnterDocument(operation, definition *ast.Document) { + p.operation, p.definition = operation, definition +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/local_type_field_extractor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/local_type_field_extractor.go new file mode 100644 index 00000000000..33be3a251c9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/local_type_field_extractor.go @@ -0,0 +1,349 @@ +package plan + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +const FederationKeyDirectiveName = "key" + +const ( + federationRequireDirectiveName = "requires" + federationExternalDirectiveName = "external" +) + +// LocalTypeFieldExtractor takes an ast.Document as input and generates the +// TypeField configuration for both root and child nodes. Root nodes are the +// root operation types (usually Query, Mutation and Schema--though these types +// can be configured via the schema keyword) plus "entities" as defined by the +// Apollo federation specification. In short, entities are types with a @key +// directive. Child nodes are field types recursively accessible via a root +// node. Nodes are either object or interface definitions or extensions. Root +// nodes only include "local" fields; they don't include fields that have the +// @external directive. +type LocalTypeFieldExtractor struct { + document *ast.Document + queryTypeName string + mutationTypeName string + subscriptionTypeName string + nodeInfoMap map[string]*nodeInformation + possibleInterfaceTypes map[string][]string + rootNodeNames *rootNodeNamesMap + childrenSeen map[string]struct{} + childrenToProcess []string + rootNodes []TypeField + childNodes []TypeField +} + +func NewLocalTypeFieldExtractor(document *ast.Document) *LocalTypeFieldExtractor { + return &LocalTypeFieldExtractor{ + document: document, + queryTypeName: "Query", + mutationTypeName: "Mutation", + subscriptionTypeName: "Subscription", + rootNodes: make([]TypeField, 0), + childNodes: make([]TypeField, 0), + } +} + +type nodeInformation struct { + typeName string + hasKeyDirective bool + isInterface bool + isRoot bool + concreteTypeNames []string + localFieldRefs []int + externalFieldRefs []int + requiredFields map[string]struct{} +} + +type rootNodeNamesMap struct { + index int + names map[string]int +} + +func newRootNodeNamesMap() *rootNodeNamesMap { + return &rootNodeNamesMap{ + index: 0, + names: map[string]int{}, + } +} + +func (r *rootNodeNamesMap) append(name string) { + if _, ok := r.names[name]; ok { + return + } + + r.names[name] = r.index + r.index++ +} + +func (r *rootNodeNamesMap) asSlice() []string { + s := make([]string, len(r.names)) + for name, i := range r.names { + s[i] = name + } + + return s +} + +// GetAllNodes returns all root and child nodes in the document associated with +// the LocalTypeFieldExtractor. See LocalTypeFieldExtractor for a detailed +// explanation of what root and child nodes are. +func (e *LocalTypeFieldExtractor) GetAllNodes() ([]TypeField, []TypeField) { + // The strategy for the extractor is as follows: + // + // 1. Loop over each node in the document and collect information into + // "node info" structs. All document nodes are processed before creating + // the final "root" and "child" plan nodes because multiple document + // nodes may correspond to a single "node info" struct. For example, + // `type User { ... }` and `extend type User { ... }` nodes will + // correspond to a single User struct. + // + // 2. Build root nodes for each node info struct identified as a root node. + // + // 3. Push the root node info structs into a queue and construct a child + // node for each info struct in the queue. After constructing a child + // node, loop over the fields of the child type and add any object or + // abstract type to the queue if the type hasn't yet been processed. An + // abstract type is either an interface or union. When processing + // abstract types, also add the corresponding concrete types to the + // queue (i.e. all the types that implement an interface and union + // members). Note that child nodes aren't created for union types--only + // union members--since it ISN'T possible to select directly from a + // union; union selection sets MUST contain fragments. + + e.nodeInfoMap = make(map[string]*nodeInformation, len(e.document.RootNodes)) + e.possibleInterfaceTypes = map[string][]string{} + e.rootNodeNames = newRootNodeNamesMap() + e.overrideRootOperationTypeNames() + + // 1. Loop over each node in the document (see description above). + e.collectNodeInformation() + + // Record the concrete types for each interface. + e.assignConcreteTypesToInterfaces() + + // Make sure that root and child node slices are cleared + e.resetRootAndChildNodes() + + // 2. Create the root nodes. Also, loop over the fields to find additional + // child nodes to process. + e.createRootNodes() + + // 3. Process the child node queue to create child nodes. When processing + // child nodes, loop over the fields of the child to find additional + // children to process. + e.createChildNodes() + + return e.rootNodes, e.childNodes +} + +func (e *LocalTypeFieldExtractor) overrideRootOperationTypeNames() { + indexedQueryTypeName := string(e.document.Index.QueryTypeName) + if indexedQueryTypeName != "" && indexedQueryTypeName != e.queryTypeName { + e.queryTypeName = indexedQueryTypeName + } + + indexedMutationTypeName := string(e.document.Index.MutationTypeName) + if indexedMutationTypeName != "" && indexedMutationTypeName != e.mutationTypeName { + e.mutationTypeName = indexedMutationTypeName + } + + indexedSubscriptionTypeName := string(e.document.Index.SubscriptionTypeName) + if indexedSubscriptionTypeName != "" && indexedSubscriptionTypeName != e.subscriptionTypeName { + e.subscriptionTypeName = indexedSubscriptionTypeName + } +} + +func (e *LocalTypeFieldExtractor) collectNodeInformation() { + for _, astNode := range e.document.RootNodes { + nodeInfo := e.getNodeInfo(astNode) + + switch astNode.Kind { + case ast.NodeKindObjectTypeDefinition, ast.NodeKindObjectTypeExtension: + for _, ref := range e.document.NodeInterfaceRefs(astNode) { + interfaceName := e.document.ResolveTypeNameString(ref) + // The document doesn't provide a way to directly look up the + // types that implement an interface, so instead we track the + // interfaces implemented for each type and after all nodes + // have been processed record the concrete types for each + // interface. + e.possibleInterfaceTypes[interfaceName] = append( + e.possibleInterfaceTypes[interfaceName], nodeInfo.typeName) + } + case ast.NodeKindInterfaceTypeDefinition, ast.NodeKindInterfaceTypeExtension: + nodeInfo.isInterface = true + case ast.NodeKindUnionTypeDefinition, ast.NodeKindUnionTypeExtension: + for _, ref := range e.document.NodeUnionMemberRefs(astNode) { + // Local union extensions are disjoint. For details, see the GraphQL + // spec: https://spec.graphql.org/October2021/#sec-Union-Extensions + memberName := e.document.ResolveTypeNameString(ref) + nodeInfo.concreteTypeNames = append(nodeInfo.concreteTypeNames, memberName) + } + default: + continue + } + + nodeInfo.isRoot = nodeInfo.isRoot || e.isRootNode(nodeInfo) + if nodeInfo.isRoot { + e.rootNodeNames.append(nodeInfo.typeName) + } + + // Record the local, external, and required fields separately for later + // processing. Root nodes only include local fields, while child nodes + // include all three fields. + e.collectFieldDefinitions(astNode, nodeInfo) + } +} + +func (e *LocalTypeFieldExtractor) getNodeInfo(node ast.Node) *nodeInformation { + typeName := e.document.NodeNameString(node) + nodeInfo, ok := e.nodeInfoMap[typeName] + if ok { + // if this node has the key directive, we need to add it to the node information + nodeInfo.hasKeyDirective = nodeInfo.hasKeyDirective || e.document.NodeHasDirectiveByNameString(node, FederationKeyDirectiveName) + return nodeInfo + } + + nodeInfo = &nodeInformation{ + typeName: typeName, + hasKeyDirective: e.document.NodeHasDirectiveByNameString(node, FederationKeyDirectiveName), + requiredFields: make(map[string]struct{}), + } + + e.nodeInfoMap[typeName] = nodeInfo + return nodeInfo +} + +func (e *LocalTypeFieldExtractor) isRootNode(nodeInfo *nodeInformation) bool { + isFederationEntity := nodeInfo.hasKeyDirective && !nodeInfo.isInterface + return nodeInfo.typeName == e.queryTypeName || + nodeInfo.typeName == e.mutationTypeName || + nodeInfo.typeName == e.subscriptionTypeName || + isFederationEntity +} + +func (e *LocalTypeFieldExtractor) collectFieldDefinitions(node ast.Node, nodeInfo *nodeInformation) { + for _, ref := range e.document.NodeFieldDefinitions(node) { + isExternal := e.document.FieldDefinitionHasNamedDirective(ref, + federationExternalDirectiveName) + + if isExternal { + nodeInfo.externalFieldRefs = append(nodeInfo.externalFieldRefs, ref) + } else { + nodeInfo.localFieldRefs = append(nodeInfo.localFieldRefs, ref) + } + + requiredFields := requiredFieldsByRequiresDirective(e.document, ref) + for _, field := range requiredFields { + nodeInfo.requiredFields[field] = struct{}{} + } + } +} + +func (e *LocalTypeFieldExtractor) assignConcreteTypesToInterfaces() { + for interfaceName, concreteTypeNames := range e.possibleInterfaceTypes { + if nodeInfo, ok := e.nodeInfoMap[interfaceName]; ok { + nodeInfo.concreteTypeNames = concreteTypeNames + } + } +} + +// pushChildIfNotAlreadyProcessed pushes a child type onto the queue if it +// hasn't already been processed. Only types with node info are pushed onto +// the queue. Recall that node info is limited to object types, interfaces +// and union members above. +func (e *LocalTypeFieldExtractor) pushChildIfNotAlreadyProcessed(typeName string) { + if _, ok := e.childrenSeen[typeName]; !ok { + if _, ok := e.nodeInfoMap[typeName]; ok { + e.childrenToProcess = append(e.childrenToProcess, typeName) + } + e.childrenSeen[typeName] = struct{}{} + } +} + +// processFieldRef pushes node info for the field's type as well as--in the +// case of abstract types--node info for each concrete type. +func (e *LocalTypeFieldExtractor) processFieldRef(ref int) string { + fieldType := e.document.FieldDefinitionType(ref) + fieldTypeName := e.document.ResolveTypeNameString(fieldType) + e.pushChildIfNotAlreadyProcessed(fieldTypeName) + if nodeInfo, ok := e.nodeInfoMap[fieldTypeName]; ok { + for _, name := range nodeInfo.concreteTypeNames { + e.pushChildIfNotAlreadyProcessed(name) + } + } + return e.document.FieldDefinitionNameString(ref) +} + +func (e *LocalTypeFieldExtractor) resetRootAndChildNodes() { + e.rootNodes = e.rootNodes[:0] + e.childNodes = e.childNodes[:0] + + // This is the queue used in step 3, child node construction. + e.childrenSeen = make(map[string]struct{}, len(e.nodeInfoMap)) + e.childrenToProcess = make([]string, 0, len(e.nodeInfoMap)) +} + +func (e *LocalTypeFieldExtractor) createRootNodes() { + for _, typeName := range e.rootNodeNames.asSlice() { + nodeInfo := e.nodeInfoMap[typeName] + numFields := len(nodeInfo.localFieldRefs) + if numFields == 0 { + continue + } + fieldNames := make([]string, numFields) + for i, ref := range nodeInfo.localFieldRefs { + fieldNames[i] = e.processFieldRef(ref) + } + e.rootNodes = append(e.rootNodes, TypeField{ + TypeName: typeName, + FieldNames: fieldNames, + }) + } +} + +func (e *LocalTypeFieldExtractor) createChildNodes() { + for len(e.childrenToProcess) > 0 { + typeName := e.childrenToProcess[len(e.childrenToProcess)-1] + e.childrenToProcess = e.childrenToProcess[:len(e.childrenToProcess)-1] + nodeInfo, ok := e.nodeInfoMap[typeName] + if !ok { + continue + } + numFields := len(nodeInfo.localFieldRefs) + len(nodeInfo.externalFieldRefs) + if numFields == 0 { + continue + } + fieldNames := make([]string, 0, numFields) + for _, ref := range nodeInfo.localFieldRefs { + fieldNames = append(fieldNames, e.processFieldRef(ref)) + } + for _, ref := range nodeInfo.externalFieldRefs { + // We assume that a field is marked @external for only three + // reasons: + // 1) the enclosing type is using it as a @key field + // 2) another field in this datasource @provide's it + // 3) another field in the enclosing type @require's it + // In the first two cases, that means that this datasource + // knows the value of the field, and thus we want to include + // the field in our ChildNodes. In the last case, this + // datasource does *not* know the value of the field, so + // we don't include it. + // (Note it's legal for someone to add an `@external` + // field to their extended type just for the heck of it, + // and never use that field for anything. The code below + // will wrongly say that this datasource can provide its + // value. Hopefully people don't actually do that.) + fieldName := e.processFieldRef(ref) + _, isRequired := nodeInfo.requiredFields[fieldName] + if !isRequired { + fieldNames = append(fieldNames, fieldName) + } + } + e.childNodes = append(e.childNodes, TypeField{ + TypeName: typeName, + FieldNames: fieldNames, + }) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/plan.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/plan.go new file mode 100644 index 00000000000..fe2b0d83881 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/plan.go @@ -0,0 +1,1731 @@ +package plan + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type Planner struct { + config Configuration + configurationWalker *astvisitor.Walker + configurationVisitor *configurationVisitor + planningWalker *astvisitor.Walker + planningVisitor *Visitor + requiredFieldsWalker *astvisitor.Walker + requiredFieldsVisitor *requiredFieldsVisitor +} + +type Configuration struct { + DefaultFlushIntervalMillis int64 + DataSources []DataSourceConfiguration + Fields FieldConfigurations + Types TypeConfigurations + // DisableResolveFieldPositions should be set to true for testing purposes + // This setting removes position information from all fields + // In production, this should be set to false so that error messages are easier to understand + DisableResolveFieldPositions bool +} + +type DirectiveConfigurations []DirectiveConfiguration + +func (d *DirectiveConfigurations) RenameTypeNameOnMatchStr(directiveName string) string { + for i := range *d { + if (*d)[i].DirectiveName == directiveName { + return (*d)[i].RenameTo + } + } + return directiveName +} + +func (d *DirectiveConfigurations) RenameTypeNameOnMatchBytes(directiveName []byte) []byte { + str := string(directiveName) + for i := range *d { + if (*d)[i].DirectiveName == str { + return []byte((*d)[i].RenameTo) + } + } + return directiveName +} + +type DirectiveConfiguration struct { + DirectiveName string + RenameTo string +} + +type TypeConfigurations []TypeConfiguration + +func (t *TypeConfigurations) RenameTypeNameOnMatchStr(typeName string) string { + for i := range *t { + if (*t)[i].TypeName == typeName { + return (*t)[i].RenameTo + } + } + return typeName +} + +func (t *TypeConfigurations) RenameTypeNameOnMatchBytes(typeName []byte) []byte { + str := string(typeName) + for i := range *t { + if (*t)[i].TypeName == str { + return []byte((*t)[i].RenameTo) + } + } + return typeName +} + +type TypeConfiguration struct { + TypeName string + // RenameTo modifies the TypeName + // so that a downstream Operation can contain a different TypeName than the upstream Schema + // e.g. if the downstream Operation contains { ... on Human_api { height } } + // the upstream Operation can be rewritten to { ... on Human { height }} + // by setting RenameTo to Human + // This way, Types can be suffixed / renamed in downstream Schemas while keeping the contract with the upstream ok + RenameTo string +} + +type FieldConfigurations []FieldConfiguration + +func (f FieldConfigurations) ForTypeField(typeName, fieldName string) *FieldConfiguration { + for i := range f { + if f[i].TypeName == typeName && f[i].FieldName == fieldName { + return &f[i] + } + } + return nil +} + +type FieldConfiguration struct { + TypeName string + FieldName string + // DisableDefaultMapping - instructs planner whether to use path mapping coming from Path field + DisableDefaultMapping bool + // Path - represents a json path to lookup for a field value in response json + Path []string + Arguments ArgumentsConfigurations + RequiresFields []string + // UnescapeResponseJson set to true will allow fields (String,List,Object) + // to be resolved from an escaped JSON string + // e.g. {"response":"{\"foo\":\"bar\"}"} will be returned as {"foo":"bar"} when path is "response" + // This way, it is possible to resolve a JSON string as part of the response without extra String encoding of the JSON + UnescapeResponseJson bool +} + +type ArgumentsConfigurations []ArgumentConfiguration + +func (a ArgumentsConfigurations) ForName(argName string) *ArgumentConfiguration { + for i := range a { + if a[i].Name == argName { + return &a[i] + } + } + return nil +} + +type SourceType string +type ArgumentRenderConfig string + +const ( + ObjectFieldSource SourceType = "object_field" + FieldArgumentSource SourceType = "field_argument" + RenderArgumentDefault ArgumentRenderConfig = "" + RenderArgumentAsArrayCSV ArgumentRenderConfig = "render_argument_as_array_csv" + RenderArgumentAsGraphQLValue ArgumentRenderConfig = "render_argument_as_graphql_value" + RenderArgumentAsJSONValue ArgumentRenderConfig = "render_argument_as_json_value" +) + +type ArgumentConfiguration struct { + Name string + SourceType SourceType + SourcePath []string + RenderConfig ArgumentRenderConfig + RenameTypeTo string +} + +type DataSourceConfiguration struct { + // RootNodes - defines the nodes where the responsibility of the DataSource begins + // When you enter a node and it is not a child node + // when you have entered into a field which representing data source - it means that we starting a new planning stage + RootNodes []TypeField + // ChildNodes - describes additional fields which will be requested along with fields which has a datasources + // They are always required for the Graphql datasources cause each field could have it's own datasource + // For any single point datasource like HTTP/REST or GRPC we could not request less fields, as we always get a full response + ChildNodes []TypeField + Directives DirectiveConfigurations + Factory PlannerFactory + Custom json.RawMessage +} + +func (d *DataSourceConfiguration) HasRootNode(typeName, fieldName string) bool { + for i := range d.RootNodes { + if typeName != d.RootNodes[i].TypeName { + continue + } + for j := range d.RootNodes[i].FieldNames { + if fieldName == d.RootNodes[i].FieldNames[j] { + return true + } + } + } + return false +} + +type PlannerFactory interface { + // Planner should return the DataSourcePlanner + // closer is the closing channel for all stateful DataSources + // At runtime, the Execution Engine will be instantiated with one global resolve.Closer. + // Once the Closer gets closed, all stateful DataSources must close their connections and cleanup themselves. + // They can do so by starting a goroutine on instantiation time that blocking reads on the resolve.Closer. + // Once the Closer emits the close event, they have to terminate (e.g. close database connections). + Planner(ctx context.Context) DataSourcePlanner +} + +type TypeField struct { + TypeName string + FieldNames []string +} + +type FieldMapping struct { + TypeName string + FieldName string + DisableDefaultMapping bool + Path []string +} + +// NewPlanner creates a new Planner from the Configuration and a ctx object +// The context.Context object is used to determine the lifecycle of stateful DataSources +// It's important to note that stateful DataSources must be closed when they are no longer being used +// Stateful DataSources could be those that initiate a WebSocket connection to an origin, a database client, a streaming client, etc... +// All DataSources are initiated with the same context.Context object provided to the Planner. +// To ensure that there are no memory leaks, it's therefore important to add a cancel func or timeout to the context.Context +// At the time when the resolver and all operations should be garbage collected, ensure to first cancel or timeout the ctx object +// If you don't cancel the context.Context, the goroutines will run indefinitely and there's no reference left to stop them +func NewPlanner(ctx context.Context, config Configuration) *Planner { + + // required fields pre-processing + + requiredFieldsWalker := astvisitor.NewWalker(48) + requiredFieldsV := &requiredFieldsVisitor{ + walker: &requiredFieldsWalker, + } + + requiredFieldsWalker.RegisterEnterDocumentVisitor(requiredFieldsV) + requiredFieldsWalker.RegisterEnterOperationVisitor(requiredFieldsV) + requiredFieldsWalker.RegisterEnterFieldVisitor(requiredFieldsV) + + // configuration + + configurationWalker := astvisitor.NewWalker(48) + configVisitor := &configurationVisitor{ + walker: &configurationWalker, + ctx: ctx, + } + + configurationWalker.RegisterEnterDocumentVisitor(configVisitor) + configurationWalker.RegisterFieldVisitor(configVisitor) + configurationWalker.RegisterEnterOperationVisitor(configVisitor) + configurationWalker.RegisterSelectionSetVisitor(configVisitor) + + // planning + + planningWalker := astvisitor.NewWalker(48) + planningVisitor := &Visitor{ + Walker: &planningWalker, + fieldConfigs: map[int]*FieldConfiguration{}, + disableResolveFieldPositions: config.DisableResolveFieldPositions, + } + + p := &Planner{ + config: config, + configurationWalker: &configurationWalker, + configurationVisitor: configVisitor, + planningWalker: &planningWalker, + planningVisitor: planningVisitor, + requiredFieldsWalker: &requiredFieldsWalker, + requiredFieldsVisitor: requiredFieldsV, + } + + return p +} + +func (p *Planner) SetConfig(config Configuration) { + p.config = config +} + +func (p *Planner) Plan(operation, definition *ast.Document, operationName string, report *operationreport.Report) (plan Plan) { + + // make a copy of the config as the pre-processor modifies it + + config := p.config + + // select operation + + p.selectOperation(operation, operationName, report) + if report.HasErrors() { + return + } + + // pre-process required fields + + p.preProcessRequiredFields(&config, operation, definition, report) + + // find planning paths + + p.configurationVisitor.config = config + p.configurationWalker.Walk(operation, definition, report) + + // configure planning visitor + + p.planningVisitor.planners = p.configurationVisitor.planners + p.planningVisitor.Config = config + p.planningVisitor.fetchConfigurations = p.configurationVisitor.fetches + p.planningVisitor.fieldBuffers = p.configurationVisitor.fieldBuffers + p.planningVisitor.skipFieldPaths = p.requiredFieldsVisitor.skipFieldPaths + + p.planningWalker.ResetVisitors() + p.planningWalker.SetVisitorFilter(p.planningVisitor) + p.planningWalker.RegisterDocumentVisitor(p.planningVisitor) + p.planningWalker.RegisterEnterOperationVisitor(p.planningVisitor) + p.planningWalker.RegisterFieldVisitor(p.planningVisitor) + p.planningWalker.RegisterSelectionSetVisitor(p.planningVisitor) + p.planningWalker.RegisterEnterDirectiveVisitor(p.planningVisitor) + p.planningWalker.RegisterInlineFragmentVisitor(p.planningVisitor) + + for key := range p.planningVisitor.planners { + config := p.planningVisitor.planners[key].dataSourceConfiguration + isNested := p.planningVisitor.planners[key].isNestedPlanner() + err := p.planningVisitor.planners[key].planner.Register(p.planningVisitor, config, isNested) + if err != nil { + p.planningWalker.StopWithInternalErr(err) + } + } + + // process the plan + + p.planningWalker.Walk(operation, definition, report) + + return p.planningVisitor.plan +} + +func (p *Planner) selectOperation(operation *ast.Document, operationName string, report *operationreport.Report) { + + numOfOperations := operation.NumOfOperationDefinitions() + operationName = strings.TrimSpace(operationName) + if len(operationName) == 0 && numOfOperations > 1 { + report.AddExternalError(operationreport.ErrRequiredOperationNameIsMissing()) + return + } + + if len(operationName) == 0 && numOfOperations == 1 { + operationName = operation.OperationDefinitionNameString(0) + } + + if !operation.OperationNameExists(operationName) { + report.AddExternalError(operationreport.ErrOperationWithProvidedOperationNameNotFound(operationName)) + return + } + + p.requiredFieldsVisitor.operationName = operationName + p.configurationVisitor.operationName = operationName + p.planningVisitor.OperationName = operationName +} + +func (p *Planner) preProcessRequiredFields(config *Configuration, operation, definition *ast.Document, report *operationreport.Report) { + if !p.hasRequiredFields(config) { + return + } + + p.requiredFieldsVisitor.config = config + p.requiredFieldsVisitor.operation = operation + p.requiredFieldsVisitor.definition = definition + p.requiredFieldsWalker.Walk(operation, definition, report) +} + +func (p *Planner) hasRequiredFields(config *Configuration) bool { + for i := range config.Fields { + if len(config.Fields[i].RequiresFields) != 0 { + return true + } + } + return false +} + +type Visitor struct { + Operation, Definition *ast.Document + Walker *astvisitor.Walker + Importer astimport.Importer + Config Configuration + plan Plan + OperationName string + operationDefinition int + objects []*resolve.Object + currentFields []objectFields + currentField *resolve.Field + planners []plannerConfiguration + fetchConfigurations []objectFetchConfiguration + fieldBuffers map[int]int + skipFieldPaths []string + fieldConfigs map[int]*FieldConfiguration + exportedVariables map[string]struct{} + skipIncludeFields map[int]skipIncludeField + disableResolveFieldPositions bool +} + +type skipIncludeField struct { + skip bool + skipVariableName string + include bool + includeVariableName string +} + +type objectFields struct { + popOnField int + fields *[]*resolve.Field +} + +type objectFetchConfiguration struct { + object *resolve.Object + trigger *resolve.GraphQLSubscriptionTrigger + planner DataSourcePlanner + bufferID int + isSubscription bool + fieldRef int + fieldDefinitionRef int +} + +func (v *Visitor) AllowVisitor(kind astvisitor.VisitorKind, ref int, visitor interface{}) bool { + if visitor == v { + return true + } + path := v.Walker.Path.DotDelimitedString() + switch kind { + case astvisitor.EnterField, astvisitor.LeaveField: + fieldAliasOrName := v.Operation.FieldAliasOrNameString(ref) + path = path + "." + fieldAliasOrName + } + if !strings.Contains(path, ".") { + return true + } + for _, config := range v.planners { + if config.planner == visitor && config.hasPath(path) { + switch kind { + case astvisitor.EnterField, astvisitor.LeaveField: + return config.shouldWalkFieldsOnPath(path) + case astvisitor.EnterSelectionSet, astvisitor.LeaveSelectionSet: + return !config.isExitPath(path) + default: + return true + } + } + } + return false +} + +func (v *Visitor) currentFullPath() string { + path := v.Walker.Path.DotDelimitedString() + if v.Walker.CurrentKind == ast.NodeKindField { + fieldAliasOrName := v.Operation.FieldAliasOrNameString(v.Walker.CurrentRef) + path = path + "." + fieldAliasOrName + } + return path +} + +func (v *Visitor) EnterDirective(ref int) { + directiveName := v.Operation.DirectiveNameString(ref) + ancestor := v.Walker.Ancestors[len(v.Walker.Ancestors)-1] + switch ancestor.Kind { + case ast.NodeKindOperationDefinition: + switch directiveName { + case "flushInterval": + if value, ok := v.Operation.DirectiveArgumentValueByName(ref, literal.MILLISECONDS); ok { + if value.Kind == ast.ValueKindInteger { + v.plan.SetFlushInterval(v.Operation.IntValueAsInt(value.Ref)) + } + } + } + case ast.NodeKindField: + switch directiveName { + case "stream": + initialBatchSize := 0 + if value, ok := v.Operation.DirectiveArgumentValueByName(ref, literal.INITIAL_BATCH_SIZE); ok { + if value.Kind == ast.ValueKindInteger { + initialBatchSize = int(v.Operation.IntValueAsInt32(value.Ref)) + } + } + v.currentField.Stream = &resolve.StreamField{ + InitialBatchSize: initialBatchSize, + } + case "defer": + v.currentField.Defer = &resolve.DeferField{} + } + } +} + +func (v *Visitor) EnterInlineFragment(ref int) { + directives := v.Operation.InlineFragments[ref].Directives.Refs + skip, skipVariableName := v.resolveSkip(directives) + include, includeVariableName := v.resolveInclude(directives) + set := v.Operation.InlineFragments[ref].SelectionSet + if set == -1 { + return + } + for _, selection := range v.Operation.SelectionSets[set].SelectionRefs { + switch v.Operation.Selections[selection].Kind { + case ast.SelectionKindField: + ref := v.Operation.Selections[selection].Ref + if skip || include { + v.skipIncludeFields[ref] = skipIncludeField{ + skip: skip, + skipVariableName: skipVariableName, + include: include, + includeVariableName: includeVariableName, + } + } + } + } +} + +func (v *Visitor) LeaveInlineFragment(_ int) { + +} + +func (v *Visitor) EnterSelectionSet(_ int) { + +} + +func (v *Visitor) LeaveSelectionSet(_ int) { + +} + +func (v *Visitor) EnterField(ref int) { + + if v.skipField(ref) { + return + } + + skip, skipVariableName := v.resolveSkipForField(ref) + include, includeVariableName := v.resolveIncludeForField(ref) + + fieldName := v.Operation.FieldNameBytes(ref) + fieldAliasOrName := v.Operation.FieldAliasOrNameBytes(ref) + if bytes.Equal(fieldName, literal.TYPENAME) { + v.currentField = &resolve.Field{ + Name: fieldAliasOrName, + Value: &resolve.String{ + Nullable: false, + Path: []string{"__typename"}, + IsTypeName: true, + }, + OnTypeName: v.resolveOnTypeName(), + TypeName: v.Walker.EnclosingTypeDefinition.NameString(v.Definition), + Position: v.resolveFieldPosition(ref), + SkipDirectiveDefined: skip, + SkipVariableName: skipVariableName, + IncludeDirectiveDefined: include, + IncludeVariableName: includeVariableName, + } + *v.currentFields[len(v.currentFields)-1].fields = append(*v.currentFields[len(v.currentFields)-1].fields, v.currentField) + return + } + + fieldDefinition, ok := v.Walker.FieldDefinition(ref) + if !ok { + return + } + + var ( + hasFetchConfig bool + i int + ) + for i = range v.fetchConfigurations { + if ref == v.fetchConfigurations[i].fieldRef { + hasFetchConfig = true + break + } + } + if hasFetchConfig { + if v.fetchConfigurations[i].isSubscription { + plan, ok := v.plan.(*SubscriptionResponsePlan) + if ok { + v.fetchConfigurations[i].trigger = &plan.Response.Trigger + } + } else { + v.fetchConfigurations[i].object = v.objects[len(v.objects)-1] + } + } + + path := v.resolveFieldPath(ref) + fieldDefinitionType := v.Definition.FieldDefinitionType(fieldDefinition) + bufferID, hasBuffer := v.fieldBuffers[ref] + + v.currentField = &resolve.Field{ + Name: fieldAliasOrName, + Value: v.resolveFieldValue(ref, fieldDefinitionType, true, path), + HasBuffer: hasBuffer, + BufferID: bufferID, + OnTypeName: v.resolveOnTypeName(), + Position: v.resolveFieldPosition(ref), + SkipDirectiveDefined: skip, + SkipVariableName: skipVariableName, + IncludeDirectiveDefined: include, + IncludeVariableName: includeVariableName, + } + + *v.currentFields[len(v.currentFields)-1].fields = append(*v.currentFields[len(v.currentFields)-1].fields, v.currentField) + + typeName := v.Walker.EnclosingTypeDefinition.NameString(v.Definition) + fieldNameStr := v.Operation.FieldNameString(ref) + fieldConfig := v.Config.Fields.ForTypeField(typeName, fieldNameStr) + if fieldConfig == nil { + return + } + v.fieldConfigs[ref] = fieldConfig +} + +func (v *Visitor) resolveFieldPosition(ref int) resolve.Position { + if v.disableResolveFieldPositions { + return resolve.Position{} + } + return resolve.Position{ + Line: v.Operation.Fields[ref].Position.LineStart, + Column: v.Operation.Fields[ref].Position.CharStart, + } +} + +func (v *Visitor) resolveSkipForField(ref int) (bool, string) { + skipInclude, ok := v.skipIncludeFields[ref] + if ok { + return skipInclude.skip, skipInclude.skipVariableName + } + return v.resolveSkip(v.Operation.Fields[ref].Directives.Refs) +} + +func (v *Visitor) resolveIncludeForField(ref int) (bool, string) { + skipInclude, ok := v.skipIncludeFields[ref] + if ok { + return skipInclude.include, skipInclude.includeVariableName + } + return v.resolveInclude(v.Operation.Fields[ref].Directives.Refs) +} + +func (v *Visitor) resolveSkip(directiveRefs []int) (bool, string) { + for _, i := range directiveRefs { + if v.Operation.DirectiveNameString(i) != "skip" { + continue + } + if value, ok := v.Operation.DirectiveArgumentValueByName(i, literal.IF); ok { + if value.Kind == ast.ValueKindVariable { + return true, v.Operation.VariableValueNameString(value.Ref) + } + } + } + return false, "" +} + +func (v *Visitor) resolveInclude(directiveRefs []int) (bool, string) { + for _, i := range directiveRefs { + if v.Operation.DirectiveNameString(i) != "include" { + continue + } + if value, ok := v.Operation.DirectiveArgumentValueByName(i, literal.IF); ok { + if value.Kind == ast.ValueKindVariable { + return true, v.Operation.VariableValueNameString(value.Ref) + } + } + } + return false, "" +} + +func (v *Visitor) resolveOnTypeName() []byte { + if len(v.Walker.Ancestors) < 2 { + return nil + } + inlineFragment := v.Walker.Ancestors[len(v.Walker.Ancestors)-2] + if inlineFragment.Kind != ast.NodeKindInlineFragment { + return nil + } + typeName := v.Operation.InlineFragmentTypeConditionName(inlineFragment.Ref) + return v.Config.Types.RenameTypeNameOnMatchBytes(typeName) +} + +func (v *Visitor) LeaveField(ref int) { + if v.currentFields[len(v.currentFields)-1].popOnField == ref { + v.currentFields = v.currentFields[:len(v.currentFields)-1] + } + fieldDefinition, ok := v.Walker.FieldDefinition(ref) + if !ok { + return + } + fieldDefinitionTypeNode := v.Definition.FieldDefinitionTypeNode(fieldDefinition) + switch fieldDefinitionTypeNode.Kind { + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + v.objects = v.objects[:len(v.objects)-1] + } +} + +func (v *Visitor) skipField(ref int) bool { + fullPath := v.Walker.Path.DotDelimitedString() + "." + v.Operation.FieldAliasOrNameString(ref) + for i := range v.skipFieldPaths { + if v.skipFieldPaths[i] == fullPath { + return true + } + } + return false +} + +func (v *Visitor) resolveFieldValue(fieldRef, typeRef int, nullable bool, path []string) resolve.Node { + ofType := v.Definition.Types[typeRef].OfType + + fieldName := v.Operation.FieldNameString(fieldRef) + enclosingTypeName := v.Walker.EnclosingTypeDefinition.NameString(v.Definition) + fieldConfig := v.Config.Fields.ForTypeField(enclosingTypeName, fieldName) + unescapeResponseJson := false + if fieldConfig != nil { + unescapeResponseJson = fieldConfig.UnescapeResponseJson + } + + switch v.Definition.Types[typeRef].TypeKind { + case ast.TypeKindNonNull: + return v.resolveFieldValue(fieldRef, ofType, false, path) + case ast.TypeKindList: + listItem := v.resolveFieldValue(fieldRef, ofType, true, nil) + return &resolve.Array{ + Nullable: nullable, + Path: path, + Item: listItem, + } + case ast.TypeKindNamed: + typeName := v.Definition.ResolveTypeNameString(typeRef) + typeDefinitionNode, ok := v.Definition.Index.FirstNodeByNameStr(typeName) + if !ok { + return &resolve.Null{} + } + switch typeDefinitionNode.Kind { + case ast.NodeKindScalarTypeDefinition: + fieldExport := v.resolveFieldExport(fieldRef) + switch typeName { + case "String": + return &resolve.String{ + Path: path, + Nullable: nullable, + Export: fieldExport, + UnescapeResponseJson: unescapeResponseJson, + } + case "Boolean": + return &resolve.Boolean{ + Path: path, + Nullable: nullable, + Export: fieldExport, + } + case "Int": + return &resolve.Integer{ + Path: path, + Nullable: nullable, + Export: fieldExport, + } + case "Float": + return &resolve.Float{ + Path: path, + Nullable: nullable, + Export: fieldExport, + } + default: + return &resolve.String{ + Path: path, + Nullable: nullable, + Export: fieldExport, + UnescapeResponseJson: unescapeResponseJson, + } + } + case ast.NodeKindEnumTypeDefinition: + return &resolve.String{ + Path: path, + Nullable: nullable, + UnescapeResponseJson: unescapeResponseJson, + } + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + object := &resolve.Object{ + Nullable: nullable, + Path: path, + Fields: []*resolve.Field{}, + UnescapeResponseJson: unescapeResponseJson, + } + v.objects = append(v.objects, object) + v.Walker.Defer(func() { + v.currentFields = append(v.currentFields, objectFields{ + popOnField: fieldRef, + fields: &object.Fields, + }) + }) + return object + default: + return &resolve.Null{} + } + default: + return &resolve.Null{} + } +} + +func (v *Visitor) resolveFieldExport(fieldRef int) *resolve.FieldExport { + if !v.Operation.Fields[fieldRef].HasDirectives { + return nil + } + exportAs := "" + for _, ref := range v.Operation.Fields[fieldRef].Directives.Refs { + if v.Operation.Input.ByteSliceString(v.Operation.Directives[ref].Name) == "export" { + value, ok := v.Operation.DirectiveArgumentValueByName(ref, []byte("as")) + if !ok { + continue + } + if value.Kind != ast.ValueKindString { + continue + } + exportAs = v.Operation.StringValueContentString(value.Ref) + } + } + if exportAs == "" { + return nil + } + variableDefinition, ok := v.Operation.VariableDefinitionByNameAndOperation(v.Walker.Ancestors[0].Ref, []byte(exportAs)) + if !ok { + return nil + } + v.exportedVariables[exportAs] = struct{}{} + + typeName := v.Operation.ResolveTypeNameString(v.Operation.VariableDefinitions[variableDefinition].Type) + switch typeName { + case "Int", "Float", "Boolean": + return &resolve.FieldExport{ + Path: []string{exportAs}, + } + default: + return &resolve.FieldExport{ + Path: []string{exportAs}, + AsString: true, + } + } +} + +func (v *Visitor) fieldRequiresExportedVariable(fieldRef int) bool { + for _, arg := range v.Operation.Fields[fieldRef].Arguments.Refs { + if v.valueRequiresExportedVariable(v.Operation.Arguments[arg].Value) { + return true + } + } + return false +} + +func (v *Visitor) valueRequiresExportedVariable(value ast.Value) bool { + switch value.Kind { + case ast.ValueKindVariable: + name := v.Operation.VariableValueNameString(value.Ref) + if _, ok := v.exportedVariables[name]; ok { + return true + } + return false + case ast.ValueKindList: + for _, ref := range v.Operation.ListValues[value.Ref].Refs { + if v.valueRequiresExportedVariable(v.Operation.Values[ref]) { + return true + } + } + return false + case ast.ValueKindObject: + for _, ref := range v.Operation.ObjectValues[value.Ref].Refs { + if v.valueRequiresExportedVariable(v.Operation.ObjectFieldValue(ref)) { + return true + } + } + return false + default: + return false + } +} + +func (v *Visitor) EnterOperationDefinition(ref int) { + operationName := v.Operation.OperationDefinitionNameString(ref) + if v.OperationName != operationName { + v.Walker.SkipNode() + return + } + + v.operationDefinition = ref + + rootObject := &resolve.Object{ + Fields: []*resolve.Field{}, + } + + v.objects = append(v.objects, rootObject) + v.currentFields = append(v.currentFields, objectFields{ + fields: &rootObject.Fields, + popOnField: -1, + }) + + isSubscription, _, err := AnalyzePlanKind(v.Operation, v.Definition, v.OperationName) + if err != nil { + v.Walker.StopWithInternalErr(err) + return + } + + graphQLResponse := &resolve.GraphQLResponse{ + Data: rootObject, + } + + if isSubscription { + v.plan = &SubscriptionResponsePlan{ + FlushInterval: v.Config.DefaultFlushIntervalMillis, + Response: &resolve.GraphQLSubscription{ + Response: graphQLResponse, + }, + } + return + } + + /*if isStreaming { + + }*/ + + v.plan = &SynchronousResponsePlan{ + Response: graphQLResponse, + } +} + +func (v *Visitor) resolveFieldPath(ref int) []string { + typeName := v.Walker.EnclosingTypeDefinition.NameString(v.Definition) + fieldName := v.Operation.FieldNameUnsafeString(ref) + config := v.currentOrParentPlannerConfiguration() + aliasOverride := false + if config.planner != nil { + aliasOverride = config.planner.DataSourcePlanningBehavior().OverrideFieldPathFromAlias + } + + for i := range v.Config.Fields { + if v.Config.Fields[i].TypeName == typeName && v.Config.Fields[i].FieldName == fieldName { + if aliasOverride { + override, exists := config.planner.DownstreamResponseFieldAlias(ref) + if exists { + return []string{override} + } + } + if aliasOverride && v.Operation.FieldAliasIsDefined(ref) { + return []string{v.Operation.FieldAliasString(ref)} + } + if v.Config.Fields[i].DisableDefaultMapping { + return nil + } + if len(v.Config.Fields[i].Path) != 0 { + return v.Config.Fields[i].Path + } + return []string{fieldName} + } + } + + if aliasOverride { + return []string{v.Operation.FieldAliasOrNameString(ref)} + } + + return []string{fieldName} +} + +func (v *Visitor) EnterDocument(operation, definition *ast.Document) { + v.Operation, v.Definition = operation, definition + v.fieldConfigs = map[int]*FieldConfiguration{} + v.exportedVariables = map[string]struct{}{} + v.skipIncludeFields = map[int]skipIncludeField{} +} + +func (v *Visitor) LeaveDocument(_, _ *ast.Document) { + for _, config := range v.fetchConfigurations { + if config.isSubscription { + v.configureSubscription(config) + } else { + v.configureObjectFetch(config) + } + } +} + +var ( + templateRegex = regexp.MustCompile(`{{.*?}}`) + selectorRegex = regexp.MustCompile(`{{\s*\.(.*?)\s*}}`) +) + +func (v *Visitor) currentOrParentPlannerConfiguration() plannerConfiguration { + const none = -1 + currentPath := v.currentFullPath() + plannerIndex := none + plannerPathDeepness := none + + for i := range v.planners { + for _, plannerPath := range v.planners[i].paths { + if v.isCurrentOrParentPath(currentPath, plannerPath.path) { + currentPlannerPathDeepness := v.pathDeepness(plannerPath.path) + if currentPlannerPathDeepness > plannerPathDeepness { + plannerPathDeepness = currentPlannerPathDeepness + plannerIndex = i + break + } + } + } + } + + if plannerIndex != none { + return v.planners[plannerIndex] + } + + return plannerConfiguration{} +} + +func (v *Visitor) isCurrentOrParentPath(currentPath string, parentPath string) bool { + return strings.HasPrefix(currentPath, parentPath) +} + +func (v *Visitor) pathDeepness(path string) int { + return strings.Count(path, ".") +} + +func (v *Visitor) resolveInputTemplates(config objectFetchConfiguration, input *string, variables *resolve.Variables) { + *input = templateRegex.ReplaceAllStringFunc(*input, func(s string) string { + selectors := selectorRegex.FindStringSubmatch(s) + if len(selectors) != 2 { + return s + } + selector := strings.TrimPrefix(selectors[1], ".") + parts := strings.Split(selector, ".") + if len(parts) < 2 { + return s + } + path := parts[1:] + var ( + variableName string + ) + switch parts[0] { + case "object": + variable := &resolve.ObjectVariable{ + Path: path, + Renderer: resolve.NewPlainVariableRenderer(), + } + variableName, _ = variables.AddVariable(variable) + case "arguments": + argumentName := path[0] + arg, ok := v.Operation.FieldArgument(config.fieldRef, []byte(argumentName)) + if !ok { + break + } + value := v.Operation.ArgumentValue(arg) + if value.Kind != ast.ValueKindVariable { + inputValueDefinition := -1 + for _, ref := range v.Definition.FieldDefinitions[config.fieldDefinitionRef].ArgumentsDefinition.Refs { + inputFieldName := v.Definition.Input.ByteSliceString(v.Definition.InputValueDefinitions[ref].Name) + if inputFieldName == argumentName { + inputValueDefinition = ref + break + } + } + if inputValueDefinition == -1 { + return "null" + } + return v.renderJSONValueTemplate(value, variables, inputValueDefinition) + } + variableValue := v.Operation.VariableValueNameString(value.Ref) + if !v.Operation.OperationDefinitionHasVariableDefinition(v.operationDefinition, variableValue) { + break // omit optional argument when variable is not defined + } + variableDefinition, exists := v.Operation.VariableDefinitionByNameAndOperation(v.operationDefinition, v.Operation.VariableValueNameBytes(value.Ref)) + if !exists { + break + } + variableTypeRef := v.Operation.VariableDefinitions[variableDefinition].Type + typeName := v.Operation.ResolveTypeNameBytes(v.Operation.VariableDefinitions[variableDefinition].Type) + node, exists := v.Definition.Index.FirstNodeByNameBytes(typeName) + if !exists { + break + } + + var variablePath []string + if len(parts) > 2 && node.Kind == ast.NodeKindInputObjectTypeDefinition { + variablePath = append(variablePath, path...) + } else { + variablePath = append(variablePath, variableValue) + } + + variable := &resolve.ContextVariable{ + Path: variablePath, + } + + if fieldConfig, ok := v.fieldConfigs[config.fieldRef]; ok { + if argumentConfig := fieldConfig.Arguments.ForName(argumentName); argumentConfig != nil { + switch argumentConfig.RenderConfig { + case RenderArgumentAsArrayCSV: + variable.Renderer = resolve.NewCSVVariableRendererFromTypeRef(v.Operation, v.Definition, variableTypeRef) + case RenderArgumentDefault: + renderer, err := resolve.NewPlainVariableRendererWithValidationFromTypeRef(v.Operation, v.Definition, variableTypeRef, variablePath...) + if err != nil { + break + } + variable.Renderer = renderer + case RenderArgumentAsGraphQLValue: + renderer, err := resolve.NewGraphQLVariableRendererFromTypeRef(v.Operation, v.Definition, variableTypeRef) + if err != nil { + break + } + variable.Renderer = renderer + case RenderArgumentAsJSONValue: + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(v.Operation, v.Definition, variableTypeRef) + if err != nil { + break + } + variable.Renderer = renderer + } + } + } + + if variable.Renderer == nil { + renderer, err := resolve.NewPlainVariableRendererWithValidationFromTypeRef(v.Operation, v.Definition, variableTypeRef, variablePath...) + if err != nil { + break + } + variable.Renderer = renderer + } + + variableName, _ = variables.AddVariable(variable) + case "request": + if len(path) != 2 { + break + } + switch path[0] { + case "headers": + key := path[1] + variableName, _ = variables.AddVariable(&resolve.HeaderVariable{ + Path: []string{key}, + }) + } + } + return variableName + }) +} + +func (v *Visitor) renderJSONValueTemplate(value ast.Value, variables *resolve.Variables, inputValueDefinition int) (out string) { + switch value.Kind { + case ast.ValueKindList: + out += "[" + addComma := false + for _, ref := range v.Operation.ListValues[value.Ref].Refs { + if addComma { + out += "," + } else { + addComma = true + } + listValue := v.Operation.Values[ref] + out += v.renderJSONValueTemplate(listValue, variables, inputValueDefinition) + } + out += "]" + case ast.ValueKindObject: + out += "{" + addComma := false + for _, ref := range v.Operation.ObjectValues[value.Ref].Refs { + fieldName := v.Operation.Input.ByteSlice(v.Operation.ObjectFields[ref].Name) + fieldValue := v.Operation.ObjectFields[ref].Value + typeName := v.Definition.ResolveTypeNameString(v.Definition.InputValueDefinitions[inputValueDefinition].Type) + typeDefinitionNode, ok := v.Definition.Index.FirstNodeByNameStr(typeName) + if !ok { + continue + } + objectFieldDefinition, ok := v.Definition.NodeInputFieldDefinitionByName(typeDefinitionNode, fieldName) + if !ok { + continue + } + if addComma { + out += "," + } else { + addComma = true + } + out += fmt.Sprintf("\"%s\":", string(fieldName)) + out += v.renderJSONValueTemplate(fieldValue, variables, objectFieldDefinition) + } + out += "}" + case ast.ValueKindVariable: + variablePath := v.Operation.VariableValueNameString(value.Ref) + inputType := v.Definition.InputValueDefinitions[inputValueDefinition].Type + renderer, err := resolve.NewJSONVariableRendererWithValidationFromTypeRef(v.Definition, v.Definition, inputType) + if err != nil { + renderer = resolve.NewJSONVariableRenderer() + } + variableName, _ := variables.AddVariable(&resolve.ContextVariable{ + Path: []string{variablePath}, + Renderer: renderer, + }) + out += variableName + } + return +} + +func (v *Visitor) configureSubscription(config objectFetchConfiguration) { + subscription := config.planner.ConfigureSubscription() + config.trigger.Variables = subscription.Variables + config.trigger.Source = subscription.DataSource + v.resolveInputTemplates(config, &subscription.Input, &config.trigger.Variables) + config.trigger.Input = []byte(subscription.Input) +} + +func (v *Visitor) configureObjectFetch(config objectFetchConfiguration) { + if config.object == nil { + return + } + fetchConfig := config.planner.ConfigureFetch() + fetch := v.configureFetch(config, fetchConfig) + + switch f := fetch.(type) { + case *resolve.SingleFetch: + v.resolveInputTemplates(config, &f.Input, &f.Variables) + case *resolve.BatchFetch: + v.resolveInputTemplates(config, &f.Fetch.Input, &f.Fetch.Variables) + } + if config.object.Fetch == nil { + config.object.Fetch = fetch + return + } + switch existing := config.object.Fetch.(type) { + case *resolve.SingleFetch: + copyOfExisting := *existing + parallel := &resolve.ParallelFetch{ + Fetches: []resolve.Fetch{©OfExisting, fetch}, + } + config.object.Fetch = parallel + case *resolve.BatchFetch: + copyOfExisting := *existing + parallel := &resolve.ParallelFetch{ + Fetches: []resolve.Fetch{©OfExisting, fetch}, + } + config.object.Fetch = parallel + case *resolve.ParallelFetch: + existing.Fetches = append(existing.Fetches, fetch) + } +} + +func (v *Visitor) configureFetch(internal objectFetchConfiguration, external FetchConfiguration) resolve.Fetch { + dataSourceType := reflect.TypeOf(external.DataSource).String() + dataSourceType = strings.TrimPrefix(dataSourceType, "*") + + singleFetch := &resolve.SingleFetch{ + BufferId: internal.bufferID, + Input: external.Input, + DataSource: external.DataSource, + Variables: external.Variables, + DisallowSingleFlight: external.DisallowSingleFlight, + DataSourceIdentifier: []byte(dataSourceType), + ProcessResponseConfig: external.ProcessResponseConfig, + DisableDataLoader: external.DisableDataLoader, + SetTemplateOutputToNullOnVariableNull: external.SetTemplateOutputToNullOnVariableNull, + } + + // if a field depends on an exported variable, data loader needs to be disabled + // this is because the data loader will render all input templates before all fields are evaluated + // exporting field values into a variable depends on the field being evaluated first + // for that reason, if a field depends on an exported variable, data loader needs to be disabled + disableDataLoader := v.fieldRequiresExportedVariable(internal.fieldRef) + if disableDataLoader { + singleFetch.DisableDataLoader = true + } + + if !external.BatchConfig.AllowBatch { + return singleFetch + } + + return &resolve.BatchFetch{ + Fetch: singleFetch, + BatchFactory: external.BatchConfig.BatchFactory, + } +} + +type Kind int + +const ( + SynchronousResponseKind Kind = iota + 1 + StreamingResponseKind + SubscriptionResponseKind +) + +type Plan interface { + PlanKind() Kind + SetFlushInterval(interval int64) +} + +type SynchronousResponsePlan struct { + Response *resolve.GraphQLResponse + FlushInterval int64 +} + +func (s *SynchronousResponsePlan) SetFlushInterval(interval int64) { + s.FlushInterval = interval +} + +func (_ *SynchronousResponsePlan) PlanKind() Kind { + return SynchronousResponseKind +} + +type StreamingResponsePlan struct { + Response *resolve.GraphQLStreamingResponse + FlushInterval int64 +} + +func (s *StreamingResponsePlan) SetFlushInterval(interval int64) { + s.FlushInterval = interval +} + +func (_ *StreamingResponsePlan) PlanKind() Kind { + return StreamingResponseKind +} + +type SubscriptionResponsePlan struct { + Response *resolve.GraphQLSubscription + FlushInterval int64 +} + +func (s *SubscriptionResponsePlan) SetFlushInterval(interval int64) { + s.FlushInterval = interval +} + +func (_ *SubscriptionResponsePlan) PlanKind() Kind { + return SubscriptionResponseKind +} + +type DataSourcePlanningBehavior struct { + // MergeAliasedRootNodes will reuse a data source for multiple root fields with aliases if true. + // Example: + // { + // rootField + // alias: rootField + // } + // On dynamic data sources (e.g. GraphQL, SQL, ...) this should return true and for + // static data sources (e.g. REST, static, GRPC...) it should be false. + MergeAliasedRootNodes bool + // OverrideFieldPathFromAlias will let the planner know if the response path should also be aliased (= true) + // or not (= false) + // Example: + // { + // rootField + // alias: original + // } + // When true expected response will be { "rootField": ..., "alias": ... } + // When false expected response will be { "rootField": ..., "original": ... } + OverrideFieldPathFromAlias bool + // IncludeTypeNameFields should be set to true if the planner wants to get EnterField & LeaveField events + // for __typename fields + IncludeTypeNameFields bool +} + +type DataSourcePlanner interface { + Register(visitor *Visitor, configuration DataSourceConfiguration, isNested bool) error + ConfigureFetch() FetchConfiguration + ConfigureSubscription() SubscriptionConfiguration + DataSourcePlanningBehavior() DataSourcePlanningBehavior + // DownstreamResponseFieldAlias allows the DataSourcePlanner to overwrite the response path with an alias + // It's required to set OverrideFieldPathFromAlias to true + // This function is useful in the following scenario + // 1. The downstream Query doesn't contain an alias + // 2. The path configuration rewrites the field to an existing field + // 3. The DataSourcePlanner is using an alias to the upstream + // Example: + // + // type Query { + // country: Country + // countryAlias: Country + // } + // + // Both, country and countryAlias have a path in the FieldConfiguration of "country" + // In theory, they would be treated as the same field + // However, by using DownstreamResponseFieldAlias, it's possible for the DataSourcePlanner to use an alias for countryAlias. + // In this case, the response would contain both, country and countryAlias fields in the response. + // At the same time, the downstream Query would only expect the response on the path "country", + // as both country and countryAlias have a mapping to the path "country". + // The DataSourcePlanner could keep track that it rewrites the upstream query and use DownstreamResponseFieldAlias + // to indicate to the Planner to expect the response for countryAlias on the path "countryAlias" instead of "country". + DownstreamResponseFieldAlias(downstreamFieldRef int) (alias string, exists bool) +} + +type SubscriptionConfiguration struct { + Input string + Variables resolve.Variables + DataSource resolve.SubscriptionDataSource +} + +type FetchConfiguration struct { + Input string + Variables resolve.Variables + DataSource resolve.DataSource + DisallowSingleFlight bool + // DisableDataLoader will configure the Resolver to not use DataLoader + // If this is set to false, the planner might still decide to override it, + // e.g. if a field depends on an exported variable which doesn't work with DataLoader + DisableDataLoader bool + ProcessResponseConfig resolve.ProcessResponseConfig + BatchConfig BatchConfig + // SetTemplateOutputToNullOnVariableNull will safely return "null" if one of the template variables renders to null + // This is the case, e.g. when using batching and one sibling is null, resulting in a null value for one batch item + // Returning null in this case tells the batch implementation to skip this item + SetTemplateOutputToNullOnVariableNull bool +} + +type BatchConfig struct { + AllowBatch bool + BatchFactory resolve.DataSourceBatchFactory +} + +type configurationVisitor struct { + operationName string + operation, definition *ast.Document + walker *astvisitor.Walker + config Configuration + planners []plannerConfiguration + fetches []objectFetchConfiguration + currentBufferId int + fieldBuffers map[int]int + + parentTypeNodes []ast.Node + + ctx context.Context +} + +func (c *configurationVisitor) EnterSelectionSet(_ int) { + c.parentTypeNodes = append(c.parentTypeNodes, c.walker.EnclosingTypeDefinition) +} + +func (c *configurationVisitor) LeaveSelectionSet(_ int) { + c.parentTypeNodes = c.parentTypeNodes[:len(c.parentTypeNodes)-1] +} + +type plannerConfiguration struct { + parentPath string + planner DataSourcePlanner + paths []pathConfiguration + dataSourceConfiguration DataSourceConfiguration + bufferID int +} + +// isNestedPlanner returns true in case the planner is not directly attached to the Operation root +// a nested planner should always build a Query +func (p *plannerConfiguration) isNestedPlanner() bool { + for i := range p.paths { + pathElements := strings.Count(p.paths[i].path, ".") + 1 + if pathElements == 2 { + return false + } + } + return true +} + +func (c *configurationVisitor) nextBufferID() int { + c.currentBufferId++ + return c.currentBufferId +} + +func (p *plannerConfiguration) hasPath(path string) bool { + for i := range p.paths { + if p.paths[i].path == path { + return true + } + } + return false +} + +func (p *plannerConfiguration) isExitPath(path string) bool { + for i := range p.paths { + if p.paths[i].path == path { + return p.paths[i].exitPlannerOnNode + } + } + return false +} + +func (p *plannerConfiguration) shouldWalkFieldsOnPath(path string) bool { + for i := range p.paths { + if p.paths[i].path == path { + return p.paths[i].shouldWalkFields + } + } + return false +} + +func (p *plannerConfiguration) setPathExit(path string) { + for i := range p.paths { + if p.paths[i].path == path { + p.paths[i].exitPlannerOnNode = true + return + } + } +} + +func (p *plannerConfiguration) hasPathPrefix(prefix string) bool { + for i := range p.paths { + if p.paths[i].path == prefix { + continue + } + if strings.HasPrefix(p.paths[i].path, prefix) { + return true + } + } + return false +} + +func (p *plannerConfiguration) hasParent(parent string) bool { + return p.parentPath == parent +} + +func (p *plannerConfiguration) hasChildNode(typeName, fieldName string) bool { + for i := range p.dataSourceConfiguration.ChildNodes { + if typeName != p.dataSourceConfiguration.ChildNodes[i].TypeName { + continue + } + for j := range p.dataSourceConfiguration.ChildNodes[i].FieldNames { + if fieldName == p.dataSourceConfiguration.ChildNodes[i].FieldNames[j] { + return true + } + } + } + return false +} + +func (p *plannerConfiguration) hasRootNode(typeName, fieldName string) bool { + for i := range p.dataSourceConfiguration.RootNodes { + if typeName != p.dataSourceConfiguration.RootNodes[i].TypeName { + continue + } + for j := range p.dataSourceConfiguration.RootNodes[i].FieldNames { + if fieldName == p.dataSourceConfiguration.RootNodes[i].FieldNames[j] { + return true + } + } + } + return false +} + +type pathConfiguration struct { + path string + exitPlannerOnNode bool + // shouldWalkFields indicates whether the planner is allowed to walk into fields + // this is needed in case we're dealing with a nested federated abstract query + // we need to be able to walk into the inline fragments and selection sets in the root + // however, we want to skip the fields at this level + // so, by setting shouldWalkFields to false, we can walk into non fields only + shouldWalkFields bool +} + +func (c *configurationVisitor) EnterOperationDefinition(ref int) { + operationName := c.operation.OperationDefinitionNameString(ref) + if c.operationName != operationName { + c.walker.SkipNode() + return + } +} + +func (c *configurationVisitor) EnterField(ref int) { + fieldName := c.operation.FieldNameUnsafeString(ref) + fieldAliasOrName := c.operation.FieldAliasOrNameString(ref) + typeName := c.walker.EnclosingTypeDefinition.NameString(c.definition) + parent := c.walker.Path.DotDelimitedString() + current := parent + "." + fieldAliasOrName + root := c.walker.Ancestors[0] + if root.Kind != ast.NodeKindOperationDefinition { + return + } + isSubscription := c.isSubscription(root.Ref, current) + for i, plannerConfig := range c.planners { + planningBehaviour := plannerConfig.planner.DataSourcePlanningBehavior() + if plannerConfig.hasParent(parent) && plannerConfig.hasRootNode(typeName, fieldName) && planningBehaviour.MergeAliasedRootNodes { + // same parent + root node = root sibling + c.planners[i].paths = append(c.planners[i].paths, pathConfiguration{path: current, shouldWalkFields: true}) + c.fieldBuffers[ref] = plannerConfig.bufferID + return + } + if plannerConfig.hasPath(parent) && plannerConfig.hasChildNode(typeName, fieldName) { + // has parent path + has child node = child + c.planners[i].paths = append(c.planners[i].paths, pathConfiguration{path: current, shouldWalkFields: true}) + return + } + if fieldAliasOrName == "__typename" && planningBehaviour.IncludeTypeNameFields { + c.planners[i].paths = append(c.planners[i].paths, pathConfiguration{path: current, shouldWalkFields: true}) + return + } + } + for i, config := range c.config.DataSources { + if config.HasRootNode(typeName, fieldName) { + var ( + bufferID int + ) + if !isSubscription { + bufferID = c.nextBufferID() + c.fieldBuffers[ref] = bufferID + } + planner := c.config.DataSources[i].Factory.Planner(c.ctx) + isParentAbstract := c.isParentTypeNodeAbstractType() + paths := []pathConfiguration{ + { + path: current, + shouldWalkFields: true, + }, + } + if isParentAbstract { + // if the parent is abstract, we add the parent path as well + // this will ensure that we're walking into and out of the root inline fragments + // otherwise, we'd only walk into the fields inside the inline fragments in the root, + // so we'd miss the selection sets and inline fragments in the root + paths = append([]pathConfiguration{ + { + path: parent, + shouldWalkFields: false, + }, + }, paths...) + } + c.planners = append(c.planners, plannerConfiguration{ + bufferID: bufferID, + parentPath: parent, + planner: planner, + paths: paths, + dataSourceConfiguration: config, + }) + fieldDefinition, ok := c.walker.FieldDefinition(ref) + if !ok { + continue + } + c.fetches = append(c.fetches, objectFetchConfiguration{ + bufferID: bufferID, + planner: planner, + isSubscription: isSubscription, + fieldRef: ref, + fieldDefinitionRef: fieldDefinition, + }) + return + } + } +} + +func (c *configurationVisitor) isParentTypeNodeAbstractType() bool { + if len(c.parentTypeNodes) < 2 { + return false + } + parentTypeNode := c.parentTypeNodes[len(c.parentTypeNodes)-2] + return parentTypeNode.Kind.IsAbstractType() +} + +func (c *configurationVisitor) LeaveField(ref int) { + fieldAliasOrName := c.operation.FieldAliasOrNameString(ref) + parent := c.walker.Path.DotDelimitedString() + current := parent + "." + fieldAliasOrName + for i, planner := range c.planners { + if planner.hasPath(current) && !planner.hasPathPrefix(current) { + c.planners[i].setPathExit(current) + return + } + } +} + +func (c *configurationVisitor) EnterDocument(operation, definition *ast.Document) { + c.operation, c.definition = operation, definition + c.currentBufferId = -1 + c.parentTypeNodes = c.parentTypeNodes[:0] + if c.planners == nil { + c.planners = make([]plannerConfiguration, 0, 8) + } else { + c.planners = c.planners[:0] + } + if c.fetches == nil { + c.fetches = []objectFetchConfiguration{} + } else { + c.fetches = c.fetches[:0] + } + if c.fieldBuffers == nil { + c.fieldBuffers = map[int]int{} + } else { + for i := range c.fieldBuffers { + delete(c.fieldBuffers, i) + } + } +} + +func (c *configurationVisitor) isSubscription(root int, path string) bool { + rootOperationType := c.operation.OperationDefinitions[root].OperationType + if rootOperationType != ast.OperationTypeSubscription { + return false + } + return strings.Count(path, ".") == 1 +} + +type requiredFieldsVisitor struct { + operation, definition *ast.Document + walker *astvisitor.Walker + config *Configuration + operationName string + skipFieldPaths []string +} + +func (r *requiredFieldsVisitor) EnterDocument(_, _ *ast.Document) { + r.skipFieldPaths = r.skipFieldPaths[:0] +} + +func (r *requiredFieldsVisitor) EnterField(ref int) { + typeName := r.walker.EnclosingTypeDefinition.NameString(r.definition) + fieldName := r.operation.FieldNameUnsafeString(ref) + fieldConfig := r.config.Fields.ForTypeField(typeName, fieldName) + if fieldConfig == nil { + return + } + if len(fieldConfig.RequiresFields) == 0 { + return + } + selectionSet := r.walker.Ancestors[len(r.walker.Ancestors)-1] + if selectionSet.Kind != ast.NodeKindSelectionSet { + return + } + for i := range fieldConfig.RequiresFields { + r.handleRequiredField(selectionSet.Ref, fieldConfig.RequiresFields[i]) + } +} + +func (r *requiredFieldsVisitor) handleRequiredField(selectionSet int, requiredFieldName string) { + for _, ref := range r.operation.SelectionSets[selectionSet].SelectionRefs { + selection := r.operation.Selections[ref] + if selection.Kind != ast.SelectionKindField { + continue + } + name := r.operation.FieldAliasOrNameString(selection.Ref) + if name == requiredFieldName { + // already exists + return + } + } + r.addRequiredField(requiredFieldName, selectionSet) +} + +func (r *requiredFieldsVisitor) addRequiredField(fieldName string, selectionSet int) { + field := ast.Field{ + Name: r.operation.Input.AppendInputString(fieldName), + } + addedField := r.operation.AddField(field) + selection := ast.Selection{ + Kind: ast.SelectionKindField, + Ref: addedField.Ref, + } + r.operation.AddSelection(selectionSet, selection) + addedFieldPath := r.walker.Path.DotDelimitedString() + "." + fieldName + r.skipFieldPaths = append(r.skipFieldPaths, addedFieldPath) +} + +func (r *requiredFieldsVisitor) EnterOperationDefinition(ref int) { + operationName := r.operation.OperationDefinitionNameString(ref) + if r.operationName != operationName { + r.walker.SkipNode() + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/required_field_extractor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/required_field_extractor.go new file mode 100644 index 00000000000..3d8b4701d2a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan/required_field_extractor.go @@ -0,0 +1,140 @@ +package plan + +import ( + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +var fieldsArgumentNameBytes = []byte("fields") + +// RequiredFieldExtractor extracts all required fields from an ast.Document +// containing a parsed federation subgraph SDL +// by visiting the directives specified in the federation specification +// and extracting the required meta data +type RequiredFieldExtractor struct { + document *ast.Document +} + +func NewRequiredFieldExtractor(document *ast.Document) *RequiredFieldExtractor { + return &RequiredFieldExtractor{ + document: document, + } +} + +func (f *RequiredFieldExtractor) GetAllRequiredFields() FieldConfigurations { + var fieldRequires FieldConfigurations + + f.addFieldsForObjectExtensionDefinitions(&fieldRequires) + f.addFieldsForObjectDefinitions(&fieldRequires) + + return fieldRequires +} + +func (f *RequiredFieldExtractor) addFieldsForObjectExtensionDefinitions(fieldRequires *FieldConfigurations) { + for _, objectTypeExt := range f.document.ObjectTypeExtensions { + objectType := objectTypeExt.ObjectTypeDefinition + typeName := f.document.Input.ByteSliceString(objectType.Name) + + primaryKeys, exists := f.primaryKeyFieldsIfObjectTypeIsEntity(objectType) + if !exists { + continue + } + + for _, fieldDefinitionRef := range objectType.FieldsDefinition.Refs { + if f.document.FieldDefinitionHasNamedDirective(fieldDefinitionRef, federationExternalDirectiveName) { + continue + } + + fieldName := f.document.FieldDefinitionNameString(fieldDefinitionRef) + + requiredFields := make([]string, len(primaryKeys)) + copy(requiredFields, primaryKeys) + + requiredFieldsByRequiresDirective := requiredFieldsByRequiresDirective(f.document, fieldDefinitionRef) + requiredFields = append(requiredFields, requiredFieldsByRequiresDirective...) + + *fieldRequires = append(*fieldRequires, FieldConfiguration{ + TypeName: typeName, + FieldName: fieldName, + RequiresFields: requiredFields, + }) + } + } +} + +func (f *RequiredFieldExtractor) addFieldsForObjectDefinitions(fieldRequires *FieldConfigurations) { + for _, objectType := range f.document.ObjectTypeDefinitions { + typeName := f.document.Input.ByteSliceString(objectType.Name) + + primaryKeys, exists := f.primaryKeyFieldsIfObjectTypeIsEntity(objectType) + if !exists { + continue + } + + primaryKeysSet := make(map[string]struct{}, len(primaryKeys)) + for _, val := range primaryKeys { + primaryKeysSet[val] = struct{}{} + } + + for _, fieldRef := range objectType.FieldsDefinition.Refs { + fieldName := f.document.FieldDefinitionNameString(fieldRef) + if _, exists := primaryKeysSet[fieldName]; exists { // Field is part of primary key, it couldn't have any required fields + continue + } + + requiredFields := make([]string, len(primaryKeys)) + copy(requiredFields, primaryKeys) + + *fieldRequires = append(*fieldRequires, FieldConfiguration{ + TypeName: typeName, + FieldName: fieldName, + RequiresFields: requiredFields, + }) + } + } +} + +func requiredFieldsByRequiresDirective(document *ast.Document, fieldDefinitionRef int) []string { + for _, directiveRef := range document.FieldDefinitions[fieldDefinitionRef].Directives.Refs { + if directiveName := document.DirectiveNameString(directiveRef); directiveName != federationRequireDirectiveName { + continue + } + + value, exists := document.DirectiveArgumentValueByName(directiveRef, fieldsArgumentNameBytes) + if !exists { + continue + } + if value.Kind != ast.ValueKindString { + continue + } + + fieldsStr := document.StringValueContentString(value.Ref) + + return strings.Split(fieldsStr, " ") + } + + return nil +} + +func (f *RequiredFieldExtractor) primaryKeyFieldsIfObjectTypeIsEntity(objectType ast.ObjectTypeDefinition) (keyFields []string, ok bool) { + for _, directiveRef := range objectType.Directives.Refs { + if directiveName := f.document.DirectiveNameString(directiveRef); directiveName != FederationKeyDirectiveName { + continue + } + + value, exists := f.document.DirectiveArgumentValueByName(directiveRef, fieldsArgumentNameBytes) + if !exists { + continue + } + if value.Kind != ast.ValueKindString { + continue + } + + fieldsStr := f.document.StringValueContentString(value.Ref) + + return strings.Split(fieldsStr, " "), true + } + + return nil, false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.go new file mode 100644 index 00000000000..ae69bdb6f6f --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.go @@ -0,0 +1,517 @@ +package resolve + +import ( + "fmt" + "sync" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" +) + +const ( + initialValueID = -1 + arrayElementKey = "@" +) + +// dataLoaderFactory is responsible for creating dataloader and provides different pools (e.g, bufPair, +// bufPairSlice, waitGroup pools). +type dataLoaderFactory struct { + dataloaderPool sync.Pool + muPool sync.Pool + waitGroupPool sync.Pool + bufPairPool sync.Pool + bufPairSlicePool sync.Pool + + fetcher *Fetcher +} + +func (df *dataLoaderFactory) getWaitGroup() *sync.WaitGroup { + return df.waitGroupPool.Get().(*sync.WaitGroup) +} + +func (df *dataLoaderFactory) freeWaitGroup(wg *sync.WaitGroup) { + df.waitGroupPool.Put(wg) +} + +func (df *dataLoaderFactory) getBufPairSlicePool() *[]*BufPair { + return df.bufPairSlicePool.Get().(*[]*BufPair) +} + +func (df *dataLoaderFactory) freeBufPairSlice(slice *[]*BufPair) { + for i := range *slice { + df.freeBufPair((*slice)[i]) + } + *slice = (*slice)[:0] + df.bufPairSlicePool.Put(slice) +} + +func (df *dataLoaderFactory) getBufPair() *BufPair { + return df.bufPairPool.Get().(*BufPair) +} + +func (df *dataLoaderFactory) freeBufPair(pair *BufPair) { + pair.Data.Reset() + pair.Errors.Reset() + df.bufPairPool.Put(pair) +} + +func (df *dataLoaderFactory) getMutex() *sync.Mutex { + return df.muPool.Get().(*sync.Mutex) +} + +func (df *dataLoaderFactory) freeMutex(mu *sync.Mutex) { + df.muPool.Put(mu) +} + +func newDataloaderFactory(fetcher *Fetcher) *dataLoaderFactory { + return &dataLoaderFactory{ + muPool: sync.Pool{ + New: func() interface{} { + return &sync.Mutex{} + }, + }, + waitGroupPool: sync.Pool{ + New: func() interface{} { + return &sync.WaitGroup{} + }, + }, + bufPairPool: sync.Pool{ + New: func() interface{} { + pair := BufPair{ + Data: fastbuffer.New(), + Errors: fastbuffer.New(), + } + return &pair + }, + }, + bufPairSlicePool: sync.Pool{ + New: func() interface{} { + slice := make([]*BufPair, 0, 24) + return &slice + }, + }, + dataloaderPool: sync.Pool{ + New: func() interface{} { + return &dataLoader{ + fetches: make(map[int]fetchState), + inUseBufPair: make([]*BufPair, 0, 8), + } + }, + }, + fetcher: fetcher, + } +} + +// newDataLoader returns new instance of dataLoader. +// initialValue represents data from subscription, initialValue will be saved with initialValueID id and could be used +// for further fetches. +func (df *dataLoaderFactory) newDataLoader(initialValue []byte) *dataLoader { + dataloader := df.dataloaderPool.Get().(*dataLoader) + + dataloader.mu = df.getMutex() + dataloader.resourceProvider = df + dataloader.fetcher = df.fetcher + + if initialValue != nil { + + buf := dataloader.getResultBufPair() + buf.Data.WriteBytes(initialValue) + + dataloader.fetches[initialValueID] = &batchFetchState{ + nextIdx: 0, + fetchError: nil, + results: []*BufPair{buf}, + } + } + + return dataloader +} + +func (df *dataLoaderFactory) freeDataLoader(d *dataLoader) { + for _, pair := range d.inUseBufPair { + d.resourceProvider.freeBufPair(pair) + } + + d.resourceProvider.freeMutex(d.mu) + + d.inUseBufPair = d.inUseBufPair[:0] + d.fetches = nil +} + +// dataLoader +type dataLoader struct { + fetches map[int]fetchState + mu *sync.Mutex + fetcher *Fetcher + resourceProvider *dataLoaderFactory + + inUseBufPair []*BufPair +} + +// Load fetches concurrently data for all siblings. +func (d *dataLoader) Load(ctx *Context, fetch *SingleFetch, responsePair *BufPair) (err error) { + var fetchResult fetchState + var resultPair *BufPair + + fetchResult, ok := d.getFetchState(fetch.BufferId) + if ok { + resultPair, err = fetchResult.next(ctx) + copyBufPair(responsePair, resultPair) + return + } + + fetchResult = &batchFetchState{} + + parentResult, ok := d.getFetchState(ctx.lastFetchID) + + if !ok { // it must be root query without subscription data + buf := d.resourceProvider.getBufPair() + defer d.resourceProvider.freeBufPair(buf) + + if err := fetch.InputTemplate.Render(ctx, nil, buf.Data); err != nil { + return err + } + + pair := d.getResultBufPair() + err = d.fetcher.Fetch(ctx, fetch, buf.Data, pair) + fetchResult = &singleFetchState{ + fetchErrors: []error{err}, + results: []*BufPair{pair}, + } + + d.setFetchState(fetchResult, fetch.BufferId) + + resultPair, err = fetchResult.next(ctx) + copyBufPair(responsePair, resultPair) + return + } + + fetchParams, err := d.selectedDataForFetch(parentResult.data(), ctx.responseElements...) + if err != nil { + return err + } + + if fetchResult, err = d.resolveSingleFetch(ctx, fetch, fetchParams); err != nil { + return err + } + + d.setFetchState(fetchResult, fetch.BufferId) + + resultPair, err = fetchResult.next(ctx) + copyBufPair(responsePair, resultPair) + + return +} + +// LoadBatch builds and resolve batch request for all siblings. +func (d *dataLoader) LoadBatch(ctx *Context, batchFetch *BatchFetch, responsePair *BufPair) (err error) { + var fetchResult fetchState + var resultPair *BufPair + fetchResult, ok := d.getFetchState(batchFetch.Fetch.BufferId) + if ok { + resultPair, err = fetchResult.next(ctx) + copyBufPair(responsePair, resultPair) + return + } + + fetchResult = &batchFetchState{} + + parentResult, ok := d.getFetchState(ctx.lastFetchID) + if !ok { + return fmt.Errorf("has not got fetch for %d", ctx.lastFetchID) + } + + fetchParams, err := d.selectedDataForFetch(parentResult.data(), ctx.responseElements...) + if err != nil { + return err + } + + if fetchResult, err = d.resolveBatchFetch(ctx, batchFetch, fetchParams); err != nil { + return err + } + + d.setFetchState(fetchResult, batchFetch.Fetch.BufferId) + + resultPair, err = fetchResult.next(ctx) + copyBufPair(responsePair, resultPair) + return +} + +func (d *dataLoader) resolveBatchFetch(ctx *Context, batchFetch *BatchFetch, fetchParams [][]byte) (fetchState *batchFetchState, err error) { + inputBufs := make([]*fastbuffer.FastBuffer, 0, len(fetchParams)) + + bufSlice := d.resourceProvider.getBufPairSlicePool() + defer d.resourceProvider.freeBufPairSlice(bufSlice) + + for i := range fetchParams { + bufPair := d.resourceProvider.getBufPair() + *bufSlice = append(*bufSlice, bufPair) + if err := batchFetch.Fetch.InputTemplate.Render(ctx, fetchParams[i], bufPair.Data); err != nil { + return nil, err + } + + inputBufs = append(inputBufs, bufPair.Data) + } + + outBuf := d.resourceProvider.getBufPair() + *bufSlice = append(*bufSlice, outBuf) + + results := make([]*BufPair, len(inputBufs)) + for i := range inputBufs { + results[i] = d.getResultBufPair() + } + + fetchState = &batchFetchState{} + + if err = d.fetcher.FetchBatch(ctx, batchFetch, inputBufs, results); err != nil { + fetchState.fetchError = err + return fetchState, nil + } + + fetchState.results = results + + return fetchState, nil +} + +func (d *dataLoader) resolveSingleFetch(ctx *Context, fetch *SingleFetch, fetchParams [][]byte) (fetchState *singleFetchState, err error) { + wg := d.resourceProvider.getWaitGroup() + defer d.resourceProvider.freeWaitGroup(wg) + + wg.Add(len(fetchParams)) + + type fetchResult struct { + result *BufPair + err error + pos int + } + + resultCh := make(chan fetchResult, len(fetchParams)) + + bufSlice := d.resourceProvider.getBufPairSlicePool() + defer d.resourceProvider.freeBufPairSlice(bufSlice) + + for i, val := range fetchParams { + bufPair := d.resourceProvider.getBufPair() + *bufSlice = append(*bufSlice, bufPair) + if err := fetch.InputTemplate.Render(ctx, val, bufPair.Data); err != nil { + return nil, err + } + + pair := d.getResultBufPair() + + go func(pos int, pair *BufPair) { + err := d.fetcher.Fetch(ctx, fetch, bufPair.Data, pair) + resultCh <- fetchResult{result: pair, err: err, pos: pos} + wg.Done() + }(i, pair) + } + + go func() { + wg.Wait() + close(resultCh) + }() + + fetchState = &singleFetchState{ + fetchErrors: make([]error, len(fetchParams)), + results: make([]*BufPair, len(fetchParams)), + } + + for res := range resultCh { + fetchState.fetchErrors[res.pos] = res.err + fetchState.results[res.pos] = res.result + } + + return fetchState, err +} + +func (d *dataLoader) getFetchState(fetchID int) (batchState fetchState, ok bool) { + d.mu.Lock() + defer d.mu.Unlock() + + batchState, ok = d.fetches[fetchID] + return +} + +func (d *dataLoader) setFetchState(batchState fetchState, fetchID int) { + d.mu.Lock() + defer d.mu.Unlock() + + d.fetches[fetchID] = batchState +} + +func (d *dataLoader) selectedDataForFetch(input [][]byte, path ...string) ([][]byte, error) { + if len(path) == 0 { + return input, nil + } + + current, rest := path[0], path[1:] + + if current == arrayElementKey { + return flatMap(input, func(val []byte) ([][]byte, error) { + var vals [][]byte + _, err := jsonparser.ArrayEach(val, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + vals = append(vals, value) + }) + if err != nil { // In case if array is null + return nil, nil + } + + return d.selectedDataForFetch(vals, rest...) + }) + } + + temp := make([][]byte, 0, len(input)) + + for i := range input { + el, dataType, _, err := jsonparser.Get(input[i], current) + if dataType == jsonparser.NotExist { + // The input has an object that doesn't contain the path component. + // This can happen in the following situation. Consider the + // following query: + // + // { + // someArrayWithInterfaceItem { + // ... on A { + // aField { + // id + // fieldFromAnotherService # <- this is federated + // } + // } + // ... on B { + // someOtherField + // } + // } + // } + // + // The result after fetching someArrayWithInterfaceItem might be: + // { + // "data": { + // "someArrayWithInterfaceItem": [ + // {"__typename": "A", "aField": {"id": 1}}, + // {"__typename": "B", "someOtherField": "hello"}, + // {"__typename": "A", "aField": {"id": 2}} + // ] + // } + // + // When resolving the fieldFromAnotherService field, we should + // only look at the type "A" inputs, since those are the only + // objects the someArrayWithInterfaceItem fetch applies to. In + // other words, inputs without "aField" are skipped. + continue + } + if err != nil { + return nil, err + } + temp = append(temp, el) + } + + return d.selectedDataForFetch(temp, rest...) +} + +func (d *dataLoader) getResultBufPair() (pair *BufPair) { + d.mu.Lock() + defer d.mu.Unlock() + + pair = d.resourceProvider.bufPairPool.Get().(*BufPair) + d.inUseBufPair = append(d.inUseBufPair, pair) + + return +} + +type fetchState interface { + data() [][]byte + next(ctx *Context) (*BufPair, error) +} + +type batchFetchState struct { + nextIdx int + + fetchError error + results []*BufPair +} + +func (b *batchFetchState) data() [][]byte { + dataSlice := make([][]byte, len(b.results)) + + for i := range b.results { + if b.results[i] != nil && b.results[i].HasData() { + dataSlice[i] = b.results[i].Data.Bytes() + } + } + + return dataSlice +} + +// next works correctly only with synchronous resolve strategy +// In case of asynchronous resolve strategy it's required to compute response position based on values from ctx (current path) +// But there is no reason for asynchronous resolve strategy, it's not useful, as all IO operations (fetching data) is be done by dataloader +func (b *batchFetchState) next(ctx *Context) (*BufPair, error) { + if b.fetchError != nil { + return nil, b.fetchError + } + + res := b.results[b.nextIdx] + + b.nextIdx++ + + return res, nil +} + +type singleFetchState struct { + nextIdx int + + fetchErrors []error + results []*BufPair +} + +func (b *singleFetchState) data() [][]byte { + dataSlice := make([][]byte, len(b.results)) + + for i := range b.results { + if b.results[i] != nil && b.results[i].HasData() { + dataSlice[i] = b.results[i].Data.Bytes() + } + } + + return dataSlice +} + +// next works correctly only with synchronous resolve strategy +// In case of asynchronous resolve strategy it's required to compute response position based on values from ctx (current path) +// But there is no reason for asynchronous resolve strategy, it's not useful, as all IO operations (fetching data) is be done by dataloader +func (b *singleFetchState) next(ctx *Context) (*BufPair, error) { + if b.fetchErrors[b.nextIdx] != nil { + return nil, b.fetchErrors[b.nextIdx] + } + + res := b.results[b.nextIdx] + + b.nextIdx++ + + return res, nil +} + +func flatMap(input [][]byte, f func(val []byte) ([][]byte, error)) ([][]byte, error) { + var result [][]byte + + for i := range input { + mapRes, err := f(input[i]) + if err != nil { + return nil, err + } + + result = append(result, mapRes...) + } + + return result, nil +} + +func copyBufPair(to, from *BufPair) { + if to == nil || from == nil { + return + } + + to.Data.WriteBytes(from.Data.Bytes()) + to.Errors.WriteBytes(from.Errors.Bytes()) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.md b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.md new file mode 100644 index 00000000000..36a482ff3df --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/dataloader.md @@ -0,0 +1,201 @@ +# This document describes current batching solution + +## Why do we need dataLoader ? + +DataLoader provides solutions for next problems: + +### 1. Batching + +Batching highly decrease number of network request to data sources. + +Resolver tries to resolve array of Products ```[Product1, Product2, Product3]```: + +**Before:** + +_fetch 1:_ +```json + { + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query": "query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables":{"representations":[{"upc":"top-1","__typename":"Product"}] + } + } +``` + +_fetch 2:_ +```json +{ + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables":{"representations":[{"upc":"top-2","__typename":"Product"}]} + } +} +``` + +_fetch 3:_ +```json +{ + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables":{"representations":[{"upc":"top-3","__typename":"Product"}]} + } +} +``` + +**After:** + +_fetch 1:_ +```json +{ + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables":{ + "representations":[ + {"upc":"top-1","__typename":"Product"}, + {"upc":"top-2","__typename":"Product"}, + {"upc":"top-3","__typename":"Product"} + ] + } + } +} +``` + +### 2. Request deduplication + +It allows requesting data for only uniq arguments set + +Resolver tries to resolve array of Products ```[Product1, Product2, Product1, Product3, Product2]```: + +**Batch without deduplication:** + +_fetch:_ +```json +{ + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables": { + "representations":[ + {"upc":"top-1","__typename":"Product"}, + {"upc":"top-2","__typename":"Product"}, + {"upc":"top-1","__typename":"Product"}, + {"upc":"top-3","__typename":"Product"}, + {"upc":"top-2","__typename":"Product"} + ] + } + } +} +``` + +**Batch with deduplication:** + +fetch: +```json +{ + "method":"POST", + "url":"http://localhost:4003", + "body":{ + "query": "query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}", + "variables": { + "representations":[ + {"upc":"top-1","__typename":"Product"}, + {"upc":"top-2","__typename":"Product"}, + {"upc":"top-3","__typename":"Product"} + ] + } + } +} +``` + +## How does dataLoader work ? + +- dataLoader is request scope object. For every new graphql request (or every new Subscription message) it's required to create a new dataloader. +- resolve.Context keeps dataLoader for current request. +- resolve.Context keeps last(parent) `lastFetchID` for current request +- resolve.Context keeps `responsePath`, it's an array of all object.Path/Array.Path since `lastFetchID`, + in case if node is Array additionally add to `responsePath` special symbol - `@` (e.g., [topProducts, @ ]) +- current dataLoader implementation is based on synchronous resolve strategy. +- when Resolver tries to resolve fetch (SingleFetch/BatchFetch) for `fetchID`, dataLoader resolves fetch with `fetchID` for all siblings. +- in case SingleFetch dataLoaders (Load) concurrently resolve fetch for all siblings +- in case BatchFetch dataLoaders (LoadBatch) creates batch request for resolving fetches for all siblings +- for creating fetch input dataLoader selects from `lastFetchID` response data by `responsePath` (it's an array of all object.Path/Array.Path since `lastFetchID`) + +**Example:** + +_Query :_ +``` +query { + topProducts { + reviews { + body + author { + username + } + } + } + } +``` + + + +``` + |topProducts| FetchID=0 + | + | + | + |---------------------------|Array of Products|--------------------| + | | | + | | | + |Product A| |Product B| |Product C| + | | | + | | | + |----------|Array of Reviews|--------| |Array of Reviews| |-----|Array of Reviews|-----| FetchID=1;LastFetchID=0;responsePath=[topProducts @] + | | | | | | + | | | | | | + |Review A1| |Review A2| |Review A3| |Review B1| |Review C1| |Review C2| + | | | | | | + | | | | | | + |Author 1| |Author 2| |Author 3| |Author 4| |Author 5| |Author 6| FetchID=2;LastFetchID=1;responsePath=[reviews @ author] + + +``` + + 1. creates dataLoader + 1. resolve `topProducts`, fetch with `FetchID=0` is required, return an array of products + * set `lastFetchID` as `0`, + * add `topProducts` to `responsePath` + * save response with FetchID `0` + 1. enters `Array of Products` node, add `@` to the `responsePath`, no need to fetch + 1. enters `Product A` node, no need to fetch + 1. enters `Array of Reviews` node, fetch with `FetchID`= 1 is required + * dataLoader gets response `{"topProducts": [{"upc": "top1", ...}, {"upc": "top2", ...}, {"upc": "top3", ...}]}` from `lastFetchID` (it has been saved in step 2) + and builds fetch input for all `Array of Reviews` siblings (`selectedDataForFetch` method is responsible for finding all siblings) + * resolve all fetches from previous step (when fetch is SingleFetch - send N concurrent requests, when fetch is BatchFetch - compose all fetches to single Batch), + Planner is responsible to choose which type of Fetch to use (e.g., for graphql datasource it makes sense to use BatchFetch for resolving Entity) + * save response with FetchID `1` + * reset `responsePath` and add `reviews` and `@` to `responsePath` (`responsePath` = `["reviews", "@"]`) + * set `lastFetchID` as `1` + 1. enters `Review A1` node, no need to fetch + * add `author` to `responsePath` (`responsePath` = `["reviews", "@", "author"]`) + 1. enters `Author A1` node, fetch with `FetchID`=2 is required (actually, it's the last step that leads to fetching) + * dataLoader gets response `[{"reviews":[{review A1}, {review A2}, {review A3}]}, {"reviews": [{review B1}]}, {"reviews": [{review C1}, {review C2}]` from `lastFetchID` (it has been saved in step 5) + and builds fetch input for all `Author 1` siblings + * resolve all fetches from previous step (when fetch is SingleFetch - send N concurrent requests, when fetch is BatchFetch - compose all fetches to single Batch) + * save response with FetchID `2` + * reset `responsePath` + * set `lastFetchID` as `2` + 1. enters `Review A2` nodes, no need to fetch + 1. enters `Author A2` node, fetch with `FetchID=2` is required + * dataLoader has already requested required data (it's saved with `FetchID=2`), it just gets second element from response for `FetchID=2` + + 1. enters second `Array of Reviews` node, fetch with `FetchID`= 1 is required + * dataLoader has already requested required data (it's saved with `FetchID=1`), it just gets second element from response for `FetchID=1` diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/engine.md b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/engine.md new file mode 100644 index 00000000000..728968b6685 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/engine.md @@ -0,0 +1,63 @@ +# this document outlines the architecture for the GraphQL Engine + +## requirements + +- have multiple data sources to resolve an object (e.g. pass an ID to two functions and merge the result, similar to map reduce) +- resolves @defer: splits query into multiple serially executable statements, each emitting a response object to the client +- resolves @stream: splits query at the @stream directive to stream a list of objects to the client +- resolves __type & __schema queries +- injects variables correctly into GraphQL sub queries in case multiple GraphQL queries will be assembled +- can map from data source response objects to correct GraphQL object structure +- can map between upstream GraphQL Enums and downstream GraphQL Enums +- can filter results from upstream data source based on custom plugin/middleware or predefined rules +- resolves child data sources inside an array concurrently +- can pass custom configuration to individual data sources, e.g. setting Headers for HTTP based data sources +- can pass request data from the downstream client to an upstream server (e.g. Headers) +- can have static data sources +- can have streaming data sources, e.g. RabbitMQ, Kafka, NATS +- can have GraphQL and non GraphQL data sources nested into each other +- can have multiple root level fields attached to the same data source +- returns an internal error in case the error is not recoverable +- returns an external error to tell the user about the error +- might return both an internal as well as an external error +- there should be triggers which are similar to data sources in that they trigger a subscription but don't resolve it themselves, instead they hand over the resolving to a data source (this is useful e.g. when you want to trigger a subscription from mutations but the mutations don't contain the data to resolve the subscription so from the trigger a query needs to be fired to resolve the subscription) +- for subscriptions with a trigger there should be an idempotent mechanism (configurable) to ensure that each trigger only fires one event in case that's the desired behaviour (e.g. polling an upstream but only emit changes) +- object path selector for arguments should make use of existing mappings (planning) +- can skip data source invocation based on conditions (e.g. missing field on parent object) +- return number of nodes in response + +## implemented in execution + +- resolves operations containing unions & interfaces +- resolves flat queries/mutations +- can define __typename for individual objects returned by data sources (users should be able to set the __typename using a middleware/plugin or predefined rules) + +## implemented in planning + +query: +resolve() -> client + +subscription: +for { + resolve() -> client +} + +stream: +resolveUser() -> client +for i := range user.friends { + resolveFriend(i) -> client + resolvePet(friend) -> client +} + + +```go +package resolving +type Resolver interface { + Resolve(ctx context.Context,userID string, config, input []byte) (output []byte, err error) +} +``` + +QueryPlan: + ResolveOneUser() + ResolveUserFriends() + ResolveManyPets() \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/fetcher.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/fetcher.go new file mode 100644 index 00000000000..f10cfbb9f2a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/fetcher.go @@ -0,0 +1,196 @@ +package resolve + +import ( + "hash" + "sync" + + "github.com/cespare/xxhash/v2" + + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" +) + +type Fetcher struct { + EnableSingleFlightLoader bool + hash64Pool sync.Pool + inflightFetchPool sync.Pool + bufPairPool sync.Pool + inflightFetchMu *sync.Mutex + inflightFetches map[uint64]*inflightFetch +} + +func NewFetcher(enableSingleFlightLoader bool) *Fetcher { + return &Fetcher{ + EnableSingleFlightLoader: enableSingleFlightLoader, + hash64Pool: sync.Pool{ + New: func() interface{} { + return xxhash.New() + }, + }, + inflightFetchPool: sync.Pool{ + New: func() interface{} { + return &inflightFetch{ + bufPair: BufPair{ + Data: fastbuffer.New(), + Errors: fastbuffer.New(), + }, + } + }, + }, + bufPairPool: sync.Pool{ + New: func() interface{} { + return NewBufPair() + }, + }, + inflightFetchMu: &sync.Mutex{}, + inflightFetches: map[uint64]*inflightFetch{}, + } +} + +func (f *Fetcher) Fetch(ctx *Context, fetch *SingleFetch, preparedInput *fastbuffer.FastBuffer, buf *BufPair) (err error) { + dataBuf := pool.BytesBuffer.Get() + defer pool.BytesBuffer.Put(dataBuf) + + if ctx.beforeFetchHook != nil { + ctx.beforeFetchHook.OnBeforeFetch(f.hookCtx(ctx), preparedInput.Bytes()) + } + + if !f.EnableSingleFlightLoader || fetch.DisallowSingleFlight { + err = fetch.DataSource.Load(ctx.Context, preparedInput.Bytes(), dataBuf) + extractResponse(dataBuf.Bytes(), buf, fetch.ProcessResponseConfig) + + if ctx.afterFetchHook != nil { + if buf.HasData() { + ctx.afterFetchHook.OnData(f.hookCtx(ctx), buf.Data.Bytes(), false) + } + if buf.HasErrors() { + ctx.afterFetchHook.OnError(f.hookCtx(ctx), buf.Errors.Bytes(), false) + } + } + return + } + + hash64 := f.getHash64() + _, _ = hash64.Write(preparedInput.Bytes()) + fetchID := hash64.Sum64() + f.putHash64(hash64) + + f.inflightFetchMu.Lock() + inflight, ok := f.inflightFetches[fetchID] + if ok { + inflight.waitFree.Add(1) + defer inflight.waitFree.Done() + f.inflightFetchMu.Unlock() + inflight.waitLoad.Wait() + if inflight.bufPair.HasData() { + if ctx.afterFetchHook != nil { + ctx.afterFetchHook.OnData(f.hookCtx(ctx), inflight.bufPair.Data.Bytes(), true) + } + buf.Data.WriteBytes(inflight.bufPair.Data.Bytes()) + } + if inflight.bufPair.HasErrors() { + if ctx.afterFetchHook != nil { + ctx.afterFetchHook.OnError(f.hookCtx(ctx), inflight.bufPair.Errors.Bytes(), true) + } + buf.Errors.WriteBytes(inflight.bufPair.Errors.Bytes()) + } + return inflight.err + } + + inflight = f.getInflightFetch() + inflight.waitLoad.Add(1) + f.inflightFetches[fetchID] = inflight + + f.inflightFetchMu.Unlock() + + err = fetch.DataSource.Load(ctx.Context, preparedInput.Bytes(), dataBuf) + extractResponse(dataBuf.Bytes(), &inflight.bufPair, fetch.ProcessResponseConfig) + inflight.err = err + + if inflight.bufPair.HasData() { + if ctx.afterFetchHook != nil { + ctx.afterFetchHook.OnData(f.hookCtx(ctx), inflight.bufPair.Data.Bytes(), false) + } + buf.Data.WriteBytes(inflight.bufPair.Data.Bytes()) + } + + if inflight.bufPair.HasErrors() { + if ctx.afterFetchHook != nil { + ctx.afterFetchHook.OnError(f.hookCtx(ctx), inflight.bufPair.Errors.Bytes(), true) + } + buf.Errors.WriteBytes(inflight.bufPair.Errors.Bytes()) + } + + inflight.waitLoad.Done() + + f.inflightFetchMu.Lock() + delete(f.inflightFetches, fetchID) + f.inflightFetchMu.Unlock() + + go func() { + inflight.waitFree.Wait() + f.freeInflightFetch(inflight) + }() + + return +} + +func (f *Fetcher) FetchBatch(ctx *Context, fetch *BatchFetch, preparedInputs []*fastbuffer.FastBuffer, bufs []*BufPair) (err error) { + inputs := make([][]byte, len(preparedInputs)) + for i := range preparedInputs { + inputs[i] = preparedInputs[i].Bytes() + } + + batch, err := fetch.BatchFactory.CreateBatch(inputs) + if err != nil { + return err + } + + buf := f.getBufPair() + defer f.freeBufPair(buf) + + if err = f.Fetch(ctx, fetch.Fetch, batch.Input(), buf); err != nil { + return err + } + + if err = batch.Demultiplex(buf, bufs); err != nil { + return err + } + + return +} + +func (f *Fetcher) getBufPair() *BufPair { + return f.bufPairPool.Get().(*BufPair) +} + +func (f *Fetcher) freeBufPair(buf *BufPair) { + buf.Reset() + f.bufPairPool.Put(buf) +} + +func (f *Fetcher) getInflightFetch() *inflightFetch { + return f.inflightFetchPool.Get().(*inflightFetch) +} + +func (f *Fetcher) freeInflightFetch(inflightFetch *inflightFetch) { + inflightFetch.bufPair.Data.Reset() + inflightFetch.bufPair.Errors.Reset() + inflightFetch.err = nil + f.inflightFetchPool.Put(inflightFetch) +} + +func (f *Fetcher) hookCtx(ctx *Context) HookContext { + return HookContext{ + CurrentPath: ctx.path(), + } +} + +func (f *Fetcher) getHash64() hash.Hash64 { + return f.hash64Pool.Get().(hash.Hash64) +} + +func (f *Fetcher) putHash64(h hash.Hash64) { + h.Reset() + f.hash64Pool.Put(h) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/inputtemplate.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/inputtemplate.go new file mode 100644 index 00000000000..2afea7faa1f --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/inputtemplate.go @@ -0,0 +1,139 @@ +package resolve + +import ( + "context" + "errors" + "fmt" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient" + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type SegmentType int + +const ( + StaticSegmentType SegmentType = iota + 1 + VariableSegmentType +) + +type TemplateSegment struct { + SegmentType SegmentType + Data []byte + VariableKind VariableKind + VariableSourcePath []string + Renderer VariableRenderer +} + +type InputTemplate struct { + Segments []TemplateSegment + // SetTemplateOutputToNullOnVariableNull will safely return "null" if one of the template variables renders to null + // This is the case, e.g. when using batching and one sibling is null, resulting in a null value for one batch item + // Returning null in this case tells the batch implementation to skip this item + SetTemplateOutputToNullOnVariableNull bool +} + +var setTemplateOutputNull = errors.New("set to null") + +func (i *InputTemplate) Render(ctx *Context, data []byte, preparedInput *fastbuffer.FastBuffer) (err error) { + undefinedVariables := make([]string, 0) + + for j := range i.Segments { + switch i.Segments[j].SegmentType { + case StaticSegmentType: + preparedInput.WriteBytes(i.Segments[j].Data) + case VariableSegmentType: + switch i.Segments[j].VariableKind { + case ObjectVariableKind: + err = i.renderObjectVariable(ctx, data, i.Segments[j], preparedInput) + case ContextVariableKind: + err = i.renderContextVariable(ctx, i.Segments[j], preparedInput, &undefinedVariables) + case HeaderVariableKind: + err = i.renderHeaderVariable(ctx, i.Segments[j].VariableSourcePath, preparedInput) + default: + err = fmt.Errorf("InputTemplate.Render: cannot resolve variable of kind: %d", i.Segments[j].VariableKind) + } + if err != nil { + if errors.Is(err, setTemplateOutputNull) { + preparedInput.Reset() + preparedInput.WriteBytes(literal.NULL) + return nil + } + return err + } + } + } + + if len(undefinedVariables) > 0 { + ctx.Context = httpclient.CtxSetUndefinedVariables(ctx.Context, undefinedVariables) + } + + return +} + +func (i *InputTemplate) renderObjectVariable(ctx context.Context, variables []byte, segment TemplateSegment, preparedInput *fastbuffer.FastBuffer) error { + value, valueType, offset, err := jsonparser.Get(variables, segment.VariableSourcePath...) + if err != nil || valueType == jsonparser.Null { + if i.SetTemplateOutputToNullOnVariableNull { + return setTemplateOutputNull + } + preparedInput.WriteBytes(literal.NULL) + return nil + } + if valueType == jsonparser.String { + value = variables[offset-len(value)-2 : offset] + switch segment.Renderer.GetKind() { + case VariableRendererKindPlain, VariableRendererKindPlanWithValidation: + if plainRenderer, ok := (segment.Renderer).(*PlainVariableRenderer); ok { + plainRenderer.rootValueType.Value = valueType + } + } + } + return segment.Renderer.RenderVariable(ctx, value, preparedInput) +} + +func (i *InputTemplate) renderContextVariable(ctx *Context, segment TemplateSegment, preparedInput *fastbuffer.FastBuffer, undefinedVariables *[]string) error { + value, valueType, offset, err := jsonparser.Get(ctx.Variables, segment.VariableSourcePath...) + if err != nil || valueType == jsonparser.Null { + if err == jsonparser.KeyPathNotFoundError { + *undefinedVariables = append(*undefinedVariables, segment.VariableSourcePath[0]) + preparedInput.WriteBytes(literal.NULL) + return nil + } + + return segment.Renderer.RenderVariable(ctx, value, preparedInput) + } + if valueType == jsonparser.String { + value = ctx.Variables[offset-len(value)-2 : offset] + switch segment.Renderer.GetKind() { + case VariableRendererKindPlain, VariableRendererKindPlanWithValidation: + if plainRenderer, ok := (segment.Renderer).(*PlainVariableRenderer); ok { + plainRenderer.rootValueType.Value = valueType + } + } + } + return segment.Renderer.RenderVariable(ctx, value, preparedInput) +} + +func (i *InputTemplate) renderHeaderVariable(ctx *Context, path []string, preparedInput *fastbuffer.FastBuffer) error { + if len(path) != 1 { + return errHeaderPathInvalid + } + value := ctx.Request.Header.Values(path[0]) + if len(value) == 0 { + return nil + } + if len(value) == 1 { + preparedInput.WriteString(value[0]) + return nil + } + for j := range value { + if j != 0 { + preparedInput.WriteBytes(literal.COMMA) + } + preparedInput.WriteString(value[j]) + } + return nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/resolve.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/resolve.go new file mode 100644 index 00000000000..98bf1459c8c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/resolve.go @@ -0,0 +1,1739 @@ +//go:generate mockgen --build_flags=--mod=mod -self_package=github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve -destination=resolve_mock_test.go -package=resolve . DataSource,BeforeFetchHook,AfterFetchHook,DataSourceBatch,DataSourceBatchFactory + +package resolve + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + "github.com/tidwall/gjson" + errors "golang.org/x/xerrors" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" +) + +var ( + lBrace = []byte("{") + rBrace = []byte("}") + lBrack = []byte("[") + rBrack = []byte("]") + comma = []byte(",") + colon = []byte(":") + quote = []byte("\"") + quotedComma = []byte(`","`) + null = []byte("null") + literalData = []byte("data") + literalErrors = []byte("errors") + literalMessage = []byte("message") + literalLocations = []byte("locations") + literalLine = []byte("line") + literalColumn = []byte("column") + literalPath = []byte("path") + literalExtensions = []byte("extensions") + + unableToResolveMsg = []byte("unable to resolve") + emptyArray = []byte("[]") +) + +var ( + errNonNullableFieldValueIsNull = errors.New("non Nullable field value is null") + errTypeNameSkipped = errors.New("skipped because of __typename condition") + errHeaderPathInvalid = errors.New("invalid header path: header variables must be of this format: .request.header.{{ key }} ") + + ErrUnableToResolve = errors.New("unable to resolve operation") +) + +var ( + responsePaths = [][]string{ + {"errors"}, + {"data"}, + } + errorPaths = [][]string{ + {"message"}, + {"locations"}, + {"path"}, + {"extensions"}, + } + entitiesPath = []string{"_entities"} +) + +const ( + rootErrorsPathIndex = 0 + rootDataPathIndex = 1 + + errorsMessagePathIndex = 0 + errorsLocationsPathIndex = 1 + errorsPathPathIndex = 2 + errorsExtensionsPathIndex = 3 +) + +type Node interface { + NodeKind() NodeKind +} + +type NodeKind int +type FetchKind int + +const ( + NodeKindObject NodeKind = iota + 1 + NodeKindEmptyObject + NodeKindArray + NodeKindEmptyArray + NodeKindNull + NodeKindString + NodeKindBoolean + NodeKindInteger + NodeKindFloat + + FetchKindSingle FetchKind = iota + 1 + FetchKindParallel + FetchKindBatch +) + +type HookContext struct { + CurrentPath []byte +} + +type BeforeFetchHook interface { + OnBeforeFetch(ctx HookContext, input []byte) +} + +type AfterFetchHook interface { + OnData(ctx HookContext, output []byte, singleFlight bool) + OnError(ctx HookContext, output []byte, singleFlight bool) +} + +type Context struct { + context.Context + Variables []byte + Request Request + pathElements [][]byte + responseElements []string + lastFetchID int + patches []patch + usedBuffers []*bytes.Buffer + currentPatch int + maxPatch int + pathPrefix []byte + dataLoader *dataLoader + beforeFetchHook BeforeFetchHook + afterFetchHook AfterFetchHook + position Position + RenameTypeNames []RenameTypeName +} + +type Request struct { + Header http.Header +} + +func NewContext(ctx context.Context) *Context { + return &Context{ + Context: ctx, + Variables: make([]byte, 0, 4096), + pathPrefix: make([]byte, 0, 4096), + pathElements: make([][]byte, 0, 16), + patches: make([]patch, 0, 48), + usedBuffers: make([]*bytes.Buffer, 0, 48), + currentPatch: -1, + maxPatch: -1, + position: Position{}, + dataLoader: nil, + } +} + +func (c *Context) Clone() Context { + variables := make([]byte, len(c.Variables)) + copy(variables, c.Variables) + pathPrefix := make([]byte, len(c.pathPrefix)) + copy(pathPrefix, c.pathPrefix) + pathElements := make([][]byte, len(c.pathElements)) + for i := range pathElements { + pathElements[i] = make([]byte, len(c.pathElements[i])) + copy(pathElements[i], c.pathElements[i]) + } + patches := make([]patch, len(c.patches)) + for i := range patches { + patches[i] = patch{ + path: make([]byte, len(c.patches[i].path)), + extraPath: make([]byte, len(c.patches[i].extraPath)), + data: make([]byte, len(c.patches[i].data)), + index: c.patches[i].index, + } + copy(patches[i].path, c.patches[i].path) + copy(patches[i].extraPath, c.patches[i].extraPath) + copy(patches[i].data, c.patches[i].data) + } + return Context{ + Context: c.Context, + Variables: variables, + Request: c.Request, + pathElements: pathElements, + patches: patches, + usedBuffers: make([]*bytes.Buffer, 0, 48), + currentPatch: c.currentPatch, + maxPatch: c.maxPatch, + pathPrefix: pathPrefix, + beforeFetchHook: c.beforeFetchHook, + afterFetchHook: c.afterFetchHook, + position: c.position, + } +} + +func (c *Context) Free() { + c.Context = nil + c.Variables = c.Variables[:0] + c.pathPrefix = c.pathPrefix[:0] + c.pathElements = c.pathElements[:0] + c.patches = c.patches[:0] + for i := range c.usedBuffers { + pool.BytesBuffer.Put(c.usedBuffers[i]) + } + c.usedBuffers = c.usedBuffers[:0] + c.currentPatch = -1 + c.maxPatch = -1 + c.beforeFetchHook = nil + c.afterFetchHook = nil + c.Request.Header = nil + c.position = Position{} + c.dataLoader = nil + c.RenameTypeNames = nil +} + +func (c *Context) SetBeforeFetchHook(hook BeforeFetchHook) { + c.beforeFetchHook = hook +} + +func (c *Context) SetAfterFetchHook(hook AfterFetchHook) { + c.afterFetchHook = hook +} + +func (c *Context) setPosition(position Position) { + c.position = position +} + +func (c *Context) addResponseElements(elements []string) { + c.responseElements = append(c.responseElements, elements...) +} + +func (c *Context) addResponseArrayElements(elements []string) { + c.responseElements = append(c.responseElements, elements...) + c.responseElements = append(c.responseElements, arrayElementKey) +} + +func (c *Context) removeResponseLastElements(elements []string) { + c.responseElements = c.responseElements[:len(c.responseElements)-len(elements)] +} +func (c *Context) removeResponseArrayLastElements(elements []string) { + c.responseElements = c.responseElements[:len(c.responseElements)-(len(elements)+1)] +} + +func (c *Context) resetResponsePathElements() { + c.responseElements = nil +} + +func (c *Context) addPathElement(elem []byte) { + c.pathElements = append(c.pathElements, elem) +} + +func (c *Context) addIntegerPathElement(elem int) { + b := unsafebytes.StringToBytes(strconv.Itoa(elem)) + c.pathElements = append(c.pathElements, b) +} + +func (c *Context) removeLastPathElement() { + c.pathElements = c.pathElements[:len(c.pathElements)-1] +} + +func (c *Context) path() []byte { + buf := pool.BytesBuffer.Get() + c.usedBuffers = append(c.usedBuffers, buf) + if len(c.pathPrefix) != 0 { + buf.Write(c.pathPrefix) + } else { + buf.Write(literal.SLASH) + buf.Write(literal.DATA) + } + for i := range c.pathElements { + if i == 0 && bytes.Equal(literal.DATA, c.pathElements[0]) { + continue + } + _, _ = buf.Write(literal.SLASH) + _, _ = buf.Write(c.pathElements[i]) + } + return buf.Bytes() +} + +func (c *Context) addPatch(index int, path, extraPath, data []byte) { + next := patch{path: path, extraPath: extraPath, data: data, index: index} + c.patches = append(c.patches, next) + c.maxPatch++ +} + +func (c *Context) popNextPatch() (patch patch, ok bool) { + c.currentPatch++ + if c.currentPatch > c.maxPatch { + return patch, false + } + return c.patches[c.currentPatch], true +} + +type patch struct { + path, extraPath, data []byte + index int +} + +type Fetch interface { + FetchKind() FetchKind +} + +type Fetches []Fetch + +type DataSourceBatchFactory interface { + CreateBatch(inputs [][]byte) (DataSourceBatch, error) +} + +type DataSourceBatch interface { + Demultiplex(responseBufPair *BufPair, outputBuffers []*BufPair) (err error) + Input() *fastbuffer.FastBuffer +} + +type DataSource interface { + Load(ctx context.Context, input []byte, w io.Writer) (err error) +} + +type SubscriptionDataSource interface { + Start(ctx context.Context, input []byte, next chan<- []byte) error +} + +type Resolver struct { + ctx context.Context + dataLoaderEnabled bool + resultSetPool sync.Pool + byteSlicesPool sync.Pool + waitGroupPool sync.Pool + bufPairPool sync.Pool + bufPairSlicePool sync.Pool + errChanPool sync.Pool + hash64Pool sync.Pool + dataloaderFactory *dataLoaderFactory + fetcher *Fetcher +} + +type inflightFetch struct { + waitLoad sync.WaitGroup + waitFree sync.WaitGroup + err error + bufPair BufPair +} + +// New returns a new Resolver, ctx.Done() is used to cancel all active subscriptions & streams +func New(ctx context.Context, fetcher *Fetcher, enableDataLoader bool) *Resolver { + return &Resolver{ + ctx: ctx, + resultSetPool: sync.Pool{ + New: func() interface{} { + return &resultSet{ + buffers: make(map[int]*BufPair, 8), + } + }, + }, + byteSlicesPool: sync.Pool{ + New: func() interface{} { + slice := make([][]byte, 0, 24) + return &slice + }, + }, + waitGroupPool: sync.Pool{ + New: func() interface{} { + return &sync.WaitGroup{} + }, + }, + bufPairPool: sync.Pool{ + New: func() interface{} { + pair := BufPair{ + Data: fastbuffer.New(), + Errors: fastbuffer.New(), + } + return &pair + }, + }, + bufPairSlicePool: sync.Pool{ + New: func() interface{} { + slice := make([]*BufPair, 0, 24) + return &slice + }, + }, + errChanPool: sync.Pool{ + New: func() interface{} { + return make(chan error, 1) + }, + }, + hash64Pool: sync.Pool{ + New: func() interface{} { + return xxhash.New() + }, + }, + dataloaderFactory: newDataloaderFactory(fetcher), + fetcher: fetcher, + dataLoaderEnabled: enableDataLoader, + } +} + +func (r *Resolver) resolveNode(ctx *Context, node Node, data []byte, bufPair *BufPair) (err error) { + switch n := node.(type) { + case *Object: + return r.resolveObject(ctx, n, data, bufPair) + case *Array: + return r.resolveArray(ctx, n, data, bufPair) + case *Null: + if n.Defer.Enabled { + r.preparePatch(ctx, n.Defer.PatchIndex, nil, data) + } + r.resolveNull(bufPair.Data) + return + case *String: + return r.resolveString(ctx, n, data, bufPair) + case *Boolean: + return r.resolveBoolean(ctx, n, data, bufPair) + case *Integer: + return r.resolveInteger(ctx, n, data, bufPair) + case *Float: + return r.resolveFloat(ctx, n, data, bufPair) + case *EmptyObject: + r.resolveEmptyObject(bufPair.Data) + return + case *EmptyArray: + r.resolveEmptyArray(bufPair.Data) + return + default: + return + } +} + +func (r *Resolver) validateContext(ctx *Context) (err error) { + if ctx.maxPatch != -1 || ctx.currentPatch != -1 { + return fmt.Errorf("Context must be resetted using Free() before re-using it") + } + return nil +} + +func extractResponse(responseData []byte, bufPair *BufPair, cfg ProcessResponseConfig) { + if len(responseData) == 0 { + return + } + + if !cfg.ExtractGraphqlResponse { + bufPair.Data.WriteBytes(responseData) + return + } + + jsonparser.EachKey(responseData, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { + switch i { + case rootErrorsPathIndex: + _, _ = jsonparser.ArrayEach(bytes, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + var ( + message, locations, path, extensions []byte + ) + jsonparser.EachKey(value, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { + switch i { + case errorsMessagePathIndex: + message = bytes + case errorsLocationsPathIndex: + locations = bytes + case errorsPathPathIndex: + path = bytes + case errorsExtensionsPathIndex: + extensions = bytes + } + }, errorPaths...) + if message != nil { + bufPair.WriteErr(message, locations, path, extensions) + } + }) + case rootDataPathIndex: + if cfg.ExtractFederationEntities { + data, _, _, _ := jsonparser.Get(bytes, entitiesPath...) + bufPair.Data.WriteBytes(data) + return + } + bufPair.Data.WriteBytes(bytes) + } + }, responsePaths...) +} + +func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, data []byte, writer io.Writer) (err error) { + + buf := r.getBufPair() + defer r.freeBufPair(buf) + + responseBuf := r.getBufPair() + defer r.freeBufPair(responseBuf) + + extractResponse(data, responseBuf, ProcessResponseConfig{ExtractGraphqlResponse: true}) + + if data != nil { + ctx.lastFetchID = initialValueID + } + + if r.dataLoaderEnabled { + ctx.dataLoader = r.dataloaderFactory.newDataLoader(responseBuf.Data.Bytes()) + defer func() { + r.dataloaderFactory.freeDataLoader(ctx.dataLoader) + ctx.dataLoader = nil + }() + } + + ignoreData := false + err = r.resolveNode(ctx, response.Data, responseBuf.Data.Bytes(), buf) + if err != nil { + if !errors.Is(err, errNonNullableFieldValueIsNull) { + return + } + ignoreData = true + } + if responseBuf.Errors.Len() > 0 { + r.MergeBufPairErrors(responseBuf, buf) + } + + return writeGraphqlResponse(buf, writer, ignoreData) +} + +func writeAndFlush(writer FlushWriter, msg []byte) error { + _, err := writer.Write(msg) + if err != nil { + return err + } + writer.Flush() + return nil +} + +func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQLSubscription, writer FlushWriter) (err error) { + + buf := r.getBufPair() + err = subscription.Trigger.InputTemplate.Render(ctx, nil, buf.Data) + if err != nil { + return + } + rendered := buf.Data.Bytes() + subscriptionInput := make([]byte, len(rendered)) + copy(subscriptionInput, rendered) + r.freeBufPair(buf) + + c, cancel := context.WithCancel(ctx) + defer cancel() + resolverDone := r.ctx.Done() + + next := make(chan []byte) + if subscription.Trigger.Source == nil { + msg := []byte(`{"errors":[{"message":"no data source found"}]}`) + return writeAndFlush(writer, msg) + } + + err = subscription.Trigger.Source.Start(c, subscriptionInput, next) + if err != nil { + if errors.Is(err, ErrUnableToResolve) { + msg := []byte(`{"errors":[{"message":"unable to resolve"}]}`) + return writeAndFlush(writer, msg) + } + return err + } + + for { + select { + case <-resolverDone: + return nil + default: + data, ok := <-next + if !ok { + return nil + } + err = r.ResolveGraphQLResponse(ctx, subscription.Response, data, writer) + if err != nil { + return err + } + writer.Flush() + } + } +} + +func (r *Resolver) ResolveGraphQLStreamingResponse(ctx *Context, response *GraphQLStreamingResponse, data []byte, writer FlushWriter) (err error) { + + if err := r.validateContext(ctx); err != nil { + return err + } + + err = r.ResolveGraphQLResponse(ctx, response.InitialResponse, data, writer) + if err != nil { + return err + } + writer.Flush() + + nextFlush := time.Now().Add(time.Millisecond * time.Duration(response.FlushInterval)) + + buf := pool.BytesBuffer.Get() + defer pool.BytesBuffer.Put(buf) + + buf.Write(literal.LBRACK) + + done := ctx.Context.Done() + +Loop: + for { + select { + case <-done: + return + default: + patch, ok := ctx.popNextPatch() + if !ok { + break Loop + } + + if patch.index > len(response.Patches)-1 { + continue + } + + if buf.Len() != 1 { + buf.Write(literal.COMMA) + } + + preparedPatch := response.Patches[patch.index] + err = r.ResolveGraphQLResponsePatch(ctx, preparedPatch, patch.data, patch.path, patch.extraPath, buf) + if err != nil { + return err + } + + now := time.Now() + if now.After(nextFlush) { + buf.Write(literal.RBRACK) + _, err = writer.Write(buf.Bytes()) + if err != nil { + return err + } + writer.Flush() + buf.Reset() + buf.Write(literal.LBRACK) + nextFlush = time.Now().Add(time.Millisecond * time.Duration(response.FlushInterval)) + } + } + } + + if buf.Len() != 1 { + buf.Write(literal.RBRACK) + _, err = writer.Write(buf.Bytes()) + if err != nil { + return err + } + writer.Flush() + } + + return +} + +func (r *Resolver) ResolveGraphQLResponsePatch(ctx *Context, patch *GraphQLResponsePatch, data, path, extraPath []byte, writer io.Writer) (err error) { + + buf := r.getBufPair() + defer r.freeBufPair(buf) + + ctx.pathPrefix = append(path, extraPath...) + + if patch.Fetch != nil { + set := r.getResultSet() + defer r.freeResultSet(set) + err = r.resolveFetch(ctx, patch.Fetch, data, set) + if err != nil { + return err + } + _, ok := set.buffers[0] + if ok { + r.MergeBufPairErrors(set.buffers[0], buf) + data = set.buffers[0].Data.Bytes() + } + } + + err = r.resolveNode(ctx, patch.Value, data, buf) + if err != nil { + return + } + + hasErrors := buf.Errors.Len() != 0 + hasData := buf.Data.Len() != 0 + + if hasErrors { + return + } + + if hasData { + if hasErrors { + err = writeSafe(err, writer, comma) + } + err = writeSafe(err, writer, lBrace) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, literal.OP) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, colon) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, patch.Operation) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, comma) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, literal.PATH) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, colon) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, path) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, comma) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, literal.VALUE) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, colon) + _, err = writer.Write(buf.Data.Bytes()) + err = writeSafe(err, writer, rBrace) + } + + return +} + +func (r *Resolver) resolveEmptyArray(b *fastbuffer.FastBuffer) { + b.WriteBytes(lBrack) + b.WriteBytes(rBrack) +} + +func (r *Resolver) resolveEmptyObject(b *fastbuffer.FastBuffer) { + b.WriteBytes(lBrace) + b.WriteBytes(rBrace) +} + +func (r *Resolver) resolveArray(ctx *Context, array *Array, data []byte, arrayBuf *BufPair) (err error) { + if len(array.Path) != 0 { + data, _, _, _ = jsonparser.Get(data, array.Path...) + } + + if bytes.Equal(data, emptyArray) { + r.resolveEmptyArray(arrayBuf.Data) + return + } + + arrayItems := r.byteSlicesPool.Get().(*[][]byte) + defer func() { + *arrayItems = (*arrayItems)[:0] + r.byteSlicesPool.Put(arrayItems) + }() + + _, _ = jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + if err == nil && dataType == jsonparser.String { + value = data[offset-2 : offset+len(value)] // add quotes to string values + } + + *arrayItems = append(*arrayItems, value) + }) + + if len(*arrayItems) == 0 { + if !array.Nullable { + r.resolveEmptyArray(arrayBuf.Data) + return errNonNullableFieldValueIsNull + } + r.resolveNull(arrayBuf.Data) + return nil + } + + ctx.addResponseArrayElements(array.Path) + defer func() { ctx.removeResponseArrayLastElements(array.Path) }() + + if array.ResolveAsynchronous && !array.Stream.Enabled && !r.dataLoaderEnabled { + return r.resolveArrayAsynchronous(ctx, array, arrayItems, arrayBuf) + } + return r.resolveArraySynchronous(ctx, array, arrayItems, arrayBuf) +} + +func (r *Resolver) resolveArraySynchronous(ctx *Context, array *Array, arrayItems *[][]byte, arrayBuf *BufPair) (err error) { + + itemBuf := r.getBufPair() + defer r.freeBufPair(itemBuf) + + arrayBuf.Data.WriteBytes(lBrack) + var ( + hasPreviousItem bool + dataWritten int + ) + for i := range *arrayItems { + + if array.Stream.Enabled { + if i > array.Stream.InitialBatchSize-1 { + ctx.addIntegerPathElement(i) + r.preparePatch(ctx, array.Stream.PatchIndex, nil, (*arrayItems)[i]) + ctx.removeLastPathElement() + continue + } + } + + ctx.addIntegerPathElement(i) + err = r.resolveNode(ctx, array.Item, (*arrayItems)[i], itemBuf) + ctx.removeLastPathElement() + if err != nil { + if errors.Is(err, errNonNullableFieldValueIsNull) && array.Nullable { + arrayBuf.Data.Reset() + r.resolveNull(arrayBuf.Data) + return nil + } + if errors.Is(err, errTypeNameSkipped) { + err = nil + continue + } + return + } + dataWritten += itemBuf.Data.Len() + r.MergeBufPairs(itemBuf, arrayBuf, hasPreviousItem) + if !hasPreviousItem && dataWritten != 0 { + hasPreviousItem = true + } + } + + arrayBuf.Data.WriteBytes(rBrack) + return +} + +func (r *Resolver) resolveArrayAsynchronous(ctx *Context, array *Array, arrayItems *[][]byte, arrayBuf *BufPair) (err error) { + + arrayBuf.Data.WriteBytes(lBrack) + + bufSlice := r.getBufPairSlice() + defer r.freeBufPairSlice(bufSlice) + + wg := r.getWaitGroup() + defer r.freeWaitGroup(wg) + + errCh := r.getErrChan() + defer r.freeErrChan(errCh) + + wg.Add(len(*arrayItems)) + + for i := range *arrayItems { + itemBuf := r.getBufPair() + *bufSlice = append(*bufSlice, itemBuf) + itemData := (*arrayItems)[i] + cloned := ctx.Clone() + go func(ctx Context, i int) { + ctx.addPathElement([]byte(strconv.Itoa(i))) + if e := r.resolveNode(&ctx, array.Item, itemData, itemBuf); e != nil && !errors.Is(e, errTypeNameSkipped) { + select { + case errCh <- e: + default: + } + } + ctx.Free() + wg.Done() + }(cloned, i) + } + + wg.Wait() + + select { + case err = <-errCh: + default: + } + + if err != nil { + if errors.Is(err, errNonNullableFieldValueIsNull) && array.Nullable { + arrayBuf.Data.Reset() + r.resolveNull(arrayBuf.Data) + return nil + } + return + } + + var ( + hasPreviousItem bool + dataWritten int + ) + for i := range *bufSlice { + dataWritten += (*bufSlice)[i].Data.Len() + r.MergeBufPairs((*bufSlice)[i], arrayBuf, hasPreviousItem) + if !hasPreviousItem && dataWritten != 0 { + hasPreviousItem = true + } + } + + arrayBuf.Data.WriteBytes(rBrack) + return +} + +func (r *Resolver) exportField(ctx *Context, export *FieldExport, value []byte) { + if export == nil { + return + } + if export.AsString { + value = append(literal.QUOTE, append(value, literal.QUOTE...)...) + } + ctx.Variables, _ = jsonparser.Set(ctx.Variables, value, export.Path...) +} + +func (r *Resolver) resolveInteger(ctx *Context, integer *Integer, data []byte, integerBuf *BufPair) error { + value, dataType, _, err := jsonparser.Get(data, integer.Path...) + if err != nil || dataType != jsonparser.Number { + if !integer.Nullable { + return errNonNullableFieldValueIsNull + } + r.resolveNull(integerBuf.Data) + return nil + } + integerBuf.Data.WriteBytes(value) + r.exportField(ctx, integer.Export, value) + return nil +} + +func (r *Resolver) resolveFloat(ctx *Context, floatValue *Float, data []byte, floatBuf *BufPair) error { + value, dataType, _, err := jsonparser.Get(data, floatValue.Path...) + if err != nil || dataType != jsonparser.Number { + if !floatValue.Nullable { + return errNonNullableFieldValueIsNull + } + r.resolveNull(floatBuf.Data) + return nil + } + floatBuf.Data.WriteBytes(value) + r.exportField(ctx, floatValue.Export, value) + return nil +} + +func (r *Resolver) resolveBoolean(ctx *Context, boolean *Boolean, data []byte, booleanBuf *BufPair) error { + value, valueType, _, err := jsonparser.Get(data, boolean.Path...) + if err != nil || valueType != jsonparser.Boolean { + if !boolean.Nullable { + return errNonNullableFieldValueIsNull + } + r.resolveNull(booleanBuf.Data) + return nil + } + booleanBuf.Data.WriteBytes(value) + r.exportField(ctx, boolean.Export, value) + return nil +} + +func (r *Resolver) resolveString(ctx *Context, str *String, data []byte, stringBuf *BufPair) error { + var ( + value []byte + valueType jsonparser.ValueType + err error + ) + + value, valueType, _, err = jsonparser.Get(data, str.Path...) + if err != nil || valueType != jsonparser.String { + if err == nil && str.UnescapeResponseJson { + switch valueType { + case jsonparser.Object, jsonparser.Array, jsonparser.Boolean, jsonparser.Number, jsonparser.Null: + stringBuf.Data.WriteBytes(value) + return nil + } + } + if value != nil && valueType != jsonparser.Null { + return fmt.Errorf("invalid value type '%s' for path %s, expecting string, got: %v. You can fix this by configuring this field as Int/Float/JSON Scalar", valueType, string(ctx.path()), string(value)) + } + if !str.Nullable { + return errNonNullableFieldValueIsNull + } + r.resolveNull(stringBuf.Data) + return nil + } + + if value == nil && !str.Nullable { + return errNonNullableFieldValueIsNull + } + + if str.UnescapeResponseJson { + value = bytes.ReplaceAll(value, []byte(`\"`), []byte(`"`)) + + // Do not modify values which was strings + // When the original value from upstream response was a plain string value `"hello"`, `"true"`, `"1"`, `"2.0"`, + // after getting it via jsonparser.Get we will get unquoted values `hello`, `true`, `1`, `2.0` + // which is not string anymore, so we need to quote it again + if !(bytes.ContainsAny(value, `{}[]`) && gjson.ValidBytes(value)) { + // wrap value in quotes to make it valid json + value = append(literal.QUOTE, append(value, literal.QUOTE...)...) + } + + stringBuf.Data.WriteBytes(value) + r.exportField(ctx, str.Export, value) + return nil + } + + value = r.renameTypeName(ctx, str, value) + + stringBuf.Data.WriteBytes(quote) + stringBuf.Data.WriteBytes(value) + stringBuf.Data.WriteBytes(quote) + r.exportField(ctx, str.Export, value) + return nil +} + +func (r *Resolver) renameTypeName(ctx *Context, str *String, typeName []byte) []byte { + if !str.IsTypeName { + return typeName + } + for i := range ctx.RenameTypeNames { + if bytes.Equal(ctx.RenameTypeNames[i].From, typeName) { + return ctx.RenameTypeNames[i].To + } + } + return typeName +} + +func (r *Resolver) preparePatch(ctx *Context, patchIndex int, extraPath, data []byte) { + buf := pool.BytesBuffer.Get() + ctx.usedBuffers = append(ctx.usedBuffers, buf) + _, _ = buf.Write(data) + path, data := ctx.path(), buf.Bytes() + ctx.addPatch(patchIndex, path, extraPath, data) +} + +func (r *Resolver) resolveNull(b *fastbuffer.FastBuffer) { + b.WriteBytes(null) +} + +func (r *Resolver) addResolveError(ctx *Context, objectBuf *BufPair) { + locations, path := pool.BytesBuffer.Get(), pool.BytesBuffer.Get() + defer pool.BytesBuffer.Put(locations) + defer pool.BytesBuffer.Put(path) + + var pathBytes []byte + + locations.Write(lBrack) + locations.Write(lBrace) + locations.Write(quote) + locations.Write(literalLine) + locations.Write(quote) + locations.Write(colon) + locations.Write([]byte(strconv.Itoa(int(ctx.position.Line)))) + locations.Write(comma) + locations.Write(quote) + locations.Write(literalColumn) + locations.Write(quote) + locations.Write(colon) + locations.Write([]byte(strconv.Itoa(int(ctx.position.Column)))) + locations.Write(rBrace) + locations.Write(rBrack) + + if len(ctx.pathElements) > 0 { + path.Write(lBrack) + path.Write(quote) + path.Write(bytes.Join(ctx.pathElements, quotedComma)) + path.Write(quote) + path.Write(rBrack) + + pathBytes = path.Bytes() + } + + objectBuf.WriteErr(unableToResolveMsg, locations.Bytes(), pathBytes, nil) +} + +func (r *Resolver) resolveObject(ctx *Context, object *Object, data []byte, objectBuf *BufPair) (err error) { + if len(object.Path) != 0 { + data, _, _, _ = jsonparser.Get(data, object.Path...) + + if len(data) == 0 || bytes.Equal(data, literal.NULL) { + // we will not traverse the children if the object is null + // therefore, we must "pop" the null element from the batch + r.recursivelySkipBatchResults(ctx, object, data) + if object.Nullable { + r.resolveNull(objectBuf.Data) + return + } + + r.addResolveError(ctx, objectBuf) + return errNonNullableFieldValueIsNull + } + + ctx.addResponseElements(object.Path) + defer ctx.removeResponseLastElements(object.Path) + } + + if object.UnescapeResponseJson { + data = bytes.ReplaceAll(data, []byte(`\"`), []byte(`"`)) + } + + var set *resultSet + if object.Fetch != nil { + set = r.getResultSet() + defer r.freeResultSet(set) + err = r.resolveFetch(ctx, object.Fetch, data, set) + if err != nil { + return + } + for i := range set.buffers { + r.MergeBufPairErrors(set.buffers[i], objectBuf) + } + } + + fieldBuf := r.getBufPair() + defer r.freeBufPair(fieldBuf) + + responseElements := ctx.responseElements + lastFetchID := ctx.lastFetchID + + typeNameSkip := false + first := true + skipCount := 0 + for i := range object.Fields { + + if object.Fields[i].SkipDirectiveDefined { + skip, err := jsonparser.GetBoolean(ctx.Variables, object.Fields[i].SkipVariableName) + if err == nil && skip { + skipCount++ + continue + } + } + + if object.Fields[i].IncludeDirectiveDefined { + include, err := jsonparser.GetBoolean(ctx.Variables, object.Fields[i].IncludeVariableName) + if err != nil || !include { + skipCount++ + continue + } + } + + var fieldData []byte + if set != nil && object.Fields[i].HasBuffer { + buffer, ok := set.buffers[object.Fields[i].BufferID] + if ok { + fieldData = buffer.Data.Bytes() + ctx.resetResponsePathElements() + ctx.lastFetchID = object.Fields[i].BufferID + } + } else { + fieldData = data + } + + if bytes.Equal(object.Fields[i].Name, literal.TYPENAME) { + // Don't overwrite the existing __typename values. + _, _, _, err = jsonparser.Get(fieldData, string(literal.TYPENAME)) + if errors.Is(err, jsonparser.KeyPathNotFoundError) { + if len(fieldData) == 0 { + fieldData = []byte(`{}`) + } + typeNameValue, _ := json.Marshal(object.Fields[i].TypeName) + fieldData, err = jsonparser.Set(fieldData, typeNameValue, string(literal.TYPENAME)) + if err != nil { + return + } + } + } + + if object.Fields[i].OnTypeName != nil { + typeName, _, _, _ := jsonparser.Get(fieldData, "__typename") + if !bytes.Equal(typeName, object.Fields[i].OnTypeName) { + typeNameSkip = true + // Restore the response elements that may have been reset above. + ctx.responseElements = responseElements + ctx.lastFetchID = lastFetchID + continue + } + } + + if first { + objectBuf.Data.WriteBytes(lBrace) + first = false + } else { + objectBuf.Data.WriteBytes(comma) + } + objectBuf.Data.WriteBytes(quote) + objectBuf.Data.WriteBytes(object.Fields[i].Name) + objectBuf.Data.WriteBytes(quote) + objectBuf.Data.WriteBytes(colon) + ctx.addPathElement(object.Fields[i].Name) + ctx.setPosition(object.Fields[i].Position) + err = r.resolveNode(ctx, object.Fields[i].Value, fieldData, fieldBuf) + ctx.removeLastPathElement() + ctx.responseElements = responseElements + ctx.lastFetchID = lastFetchID + if err != nil { + if errors.Is(err, errTypeNameSkipped) { + objectBuf.Data.Reset() + r.resolveEmptyObject(objectBuf.Data) + return nil + } + if errors.Is(err, errNonNullableFieldValueIsNull) { + objectBuf.Data.Reset() + r.MergeBufPairErrors(fieldBuf, objectBuf) + + if object.Nullable { + r.resolveNull(objectBuf.Data) + return nil + } + + // if fied is of object type than we should not add resolve error here + if _, ok := object.Fields[i].Value.(*Object); !ok { + r.addResolveError(ctx, objectBuf) + } + } + + return + } + r.MergeBufPairs(fieldBuf, objectBuf, false) + } + allSkipped := len(object.Fields) != 0 && len(object.Fields) == skipCount + if allSkipped { + // return empty object if all fields have been skipped + objectBuf.Data.WriteBytes(lBrace) + objectBuf.Data.WriteBytes(rBrace) + return + } + if first { + if typeNameSkip && !object.Nullable { + return errTypeNameSkipped + } + if !object.Nullable { + r.addResolveError(ctx, objectBuf) + return errNonNullableFieldValueIsNull + } + r.resolveNull(objectBuf.Data) + return + } + objectBuf.Data.WriteBytes(rBrace) + return +} + +// recursivelySkipBatchResults traverses an object and skips all batch results by triggering fetch +// when a fetch is attached to an object using batch fetch, only the first object will actually trigger the fetch +// subsequent objects (siblings) will load the result from the cache, filled by the first sibling +// if one sibling has no data (null), we have to "pop" the null result (generated by the batch resolver) from the cache +// this is because the "null" sibling will not trigger a fetch by itself, as it has no data and will not resolve any fields +func (r *Resolver) recursivelySkipBatchResults(ctx *Context, object *Object, data []byte) { + if object.Fetch != nil && object.Fetch.FetchKind() == FetchKindBatch { + set := r.getResultSet() + defer r.freeResultSet(set) + _ = r.resolveFetch(ctx, object.Fetch, data, set) + } + for i := range object.Fields { + value := object.Fields[i].Value + switch v := value.(type) { + case *Object: + r.recursivelySkipBatchResults(ctx, v, data) + case *Array: + switch av := v.Item.(type) { + case *Object: + r.recursivelySkipBatchResults(ctx, av, data) + } + } + } +} + +func (r *Resolver) freeResultSet(set *resultSet) { + for i := range set.buffers { + set.buffers[i].Reset() + r.bufPairPool.Put(set.buffers[i]) + delete(set.buffers, i) + } + r.resultSetPool.Put(set) +} + +func (r *Resolver) resolveFetch(ctx *Context, fetch Fetch, data []byte, set *resultSet) (err error) { + + switch f := fetch.(type) { + case *SingleFetch: + preparedInput := r.getBufPair() + defer r.freeBufPair(preparedInput) + err = r.prepareSingleFetch(ctx, f, data, set, preparedInput.Data) + if err != nil { + return err + } + err = r.resolveSingleFetch(ctx, f, preparedInput.Data, set.buffers[f.BufferId]) + case *BatchFetch: + preparedInput := r.getBufPair() + defer r.freeBufPair(preparedInput) + err = r.prepareSingleFetch(ctx, f.Fetch, data, set, preparedInput.Data) + if err != nil { + return err + } + err = r.resolveBatchFetch(ctx, f, preparedInput.Data, set.buffers[f.Fetch.BufferId]) + case *ParallelFetch: + err = r.resolveParallelFetch(ctx, f, data, set) + } + return +} + +func (r *Resolver) resolveParallelFetch(ctx *Context, fetch *ParallelFetch, data []byte, set *resultSet) (err error) { + preparedInputs := r.getBufPairSlice() + defer r.freeBufPairSlice(preparedInputs) + + resolvers := make([]func() error, 0, len(fetch.Fetches)) + + wg := r.getWaitGroup() + defer r.freeWaitGroup(wg) + + for i := range fetch.Fetches { + wg.Add(1) + switch f := fetch.Fetches[i].(type) { + case *SingleFetch: + preparedInput := r.getBufPair() + err = r.prepareSingleFetch(ctx, f, data, set, preparedInput.Data) + if err != nil { + return err + } + *preparedInputs = append(*preparedInputs, preparedInput) + buf := set.buffers[f.BufferId] + resolvers = append(resolvers, func() error { + return r.resolveSingleFetch(ctx, f, preparedInput.Data, buf) + }) + case *BatchFetch: + preparedInput := r.getBufPair() + err = r.prepareSingleFetch(ctx, f.Fetch, data, set, preparedInput.Data) + if err != nil { + return err + } + *preparedInputs = append(*preparedInputs, preparedInput) + buf := set.buffers[f.Fetch.BufferId] + resolvers = append(resolvers, func() error { + return r.resolveBatchFetch(ctx, f, preparedInput.Data, buf) + }) + } + } + + for _, resolver := range resolvers { + go func(r func() error) { + _ = r() + wg.Done() + }(resolver) + } + + wg.Wait() + + return +} + +func (r *Resolver) prepareSingleFetch(ctx *Context, fetch *SingleFetch, data []byte, set *resultSet, preparedInput *fastbuffer.FastBuffer) (err error) { + err = fetch.InputTemplate.Render(ctx, data, preparedInput) + buf := r.getBufPair() + set.buffers[fetch.BufferId] = buf + return +} + +func (r *Resolver) resolveBatchFetch(ctx *Context, fetch *BatchFetch, preparedInput *fastbuffer.FastBuffer, buf *BufPair) error { + if r.dataLoaderEnabled { + return ctx.dataLoader.LoadBatch(ctx, fetch, buf) + } + + if err := r.fetcher.FetchBatch(ctx, fetch, []*fastbuffer.FastBuffer{preparedInput}, []*BufPair{buf}); err != nil { + return err + } + + return nil +} + +func (r *Resolver) resolveSingleFetch(ctx *Context, fetch *SingleFetch, preparedInput *fastbuffer.FastBuffer, buf *BufPair) error { + if r.dataLoaderEnabled && !fetch.DisableDataLoader { + return ctx.dataLoader.Load(ctx, fetch, buf) + } + return r.fetcher.Fetch(ctx, fetch, preparedInput, buf) +} + +type Object struct { + Nullable bool + Path []string + Fields []*Field + Fetch Fetch + UnescapeResponseJson bool `json:"unescape_response_json,omitempty"` +} + +func (_ *Object) NodeKind() NodeKind { + return NodeKindObject +} + +type EmptyObject struct{} + +func (_ *EmptyObject) NodeKind() NodeKind { + return NodeKindEmptyObject +} + +type EmptyArray struct{} + +func (_ *EmptyArray) NodeKind() NodeKind { + return NodeKindEmptyArray +} + +type Field struct { + Name []byte + Value Node + Position Position + Defer *DeferField + Stream *StreamField + HasBuffer bool + BufferID int + OnTypeName []byte + TypeName string + SkipDirectiveDefined bool + SkipVariableName string + IncludeDirectiveDefined bool + IncludeVariableName string +} + +type Position struct { + Line uint32 + Column uint32 +} + +type StreamField struct { + InitialBatchSize int +} + +type DeferField struct{} + +type Null struct { + Defer Defer +} + +type Defer struct { + Enabled bool + PatchIndex int +} + +func (_ *Null) NodeKind() NodeKind { + return NodeKindNull +} + +type resultSet struct { + buffers map[int]*BufPair +} + +type SingleFetch struct { + BufferId int + Input string + DataSource DataSource + Variables Variables + // DisallowSingleFlight is used for write operations like mutations, POST, DELETE etc. to disable singleFlight + // By default SingleFlight for fetches is disabled and needs to be enabled on the Resolver first + // If the resolver allows SingleFlight it's up to each individual DataSource Planner to decide whether an Operation + // should be allowed to use SingleFlight + DisallowSingleFlight bool + DisableDataLoader bool + InputTemplate InputTemplate + DataSourceIdentifier []byte + ProcessResponseConfig ProcessResponseConfig + // SetTemplateOutputToNullOnVariableNull will safely return "null" if one of the template variables renders to null + // This is the case, e.g. when using batching and one sibling is null, resulting in a null value for one batch item + // Returning null in this case tells the batch implementation to skip this item + SetTemplateOutputToNullOnVariableNull bool +} + +type ProcessResponseConfig struct { + ExtractGraphqlResponse bool + ExtractFederationEntities bool +} + +func (_ *SingleFetch) FetchKind() FetchKind { + return FetchKindSingle +} + +type ParallelFetch struct { + Fetches []Fetch +} + +func (_ *ParallelFetch) FetchKind() FetchKind { + return FetchKindParallel +} + +type BatchFetch struct { + Fetch *SingleFetch + BatchFactory DataSourceBatchFactory +} + +func (_ *BatchFetch) FetchKind() FetchKind { + return FetchKindBatch +} + +// FieldExport takes the value of the field during evaluation (rendering of the field) +// and stores it in the variables using the Path as JSON pointer. +type FieldExport struct { + Path []string + AsString bool +} + +type String struct { + Path []string + Nullable bool + Export *FieldExport `json:"export,omitempty"` + UnescapeResponseJson bool `json:"unescape_response_json,omitempty"` + IsTypeName bool `json:"is_type_name,omitempty"` +} + +func (_ *String) NodeKind() NodeKind { + return NodeKindString +} + +type Boolean struct { + Path []string + Nullable bool + Export *FieldExport `json:"export,omitempty"` +} + +func (_ *Boolean) NodeKind() NodeKind { + return NodeKindBoolean +} + +type Float struct { + Path []string + Nullable bool + Export *FieldExport `json:"export,omitempty"` +} + +func (_ *Float) NodeKind() NodeKind { + return NodeKindFloat +} + +type Integer struct { + Path []string + Nullable bool + Export *FieldExport `json:"export,omitempty"` +} + +func (_ *Integer) NodeKind() NodeKind { + return NodeKindInteger +} + +type Array struct { + Path []string + Nullable bool + ResolveAsynchronous bool + Item Node + Stream Stream +} + +type Stream struct { + Enabled bool + InitialBatchSize int + PatchIndex int +} + +func (_ *Array) NodeKind() NodeKind { + return NodeKindArray +} + +type GraphQLSubscription struct { + Trigger GraphQLSubscriptionTrigger + Response *GraphQLResponse +} + +type GraphQLSubscriptionTrigger struct { + Input []byte + InputTemplate InputTemplate + Variables Variables + Source SubscriptionDataSource +} + +type FlushWriter interface { + io.Writer + Flush() +} + +type GraphQLResponse struct { + Data Node + RenameTypeNames []RenameTypeName +} + +type RenameTypeName struct { + From, To []byte +} + +type GraphQLStreamingResponse struct { + InitialResponse *GraphQLResponse + Patches []*GraphQLResponsePatch + FlushInterval int64 +} + +type GraphQLResponsePatch struct { + Value Node + Fetch Fetch + Operation []byte +} + +type BufPair struct { + Data *fastbuffer.FastBuffer + Errors *fastbuffer.FastBuffer +} + +func NewBufPair() *BufPair { + return &BufPair{ + Data: fastbuffer.New(), + Errors: fastbuffer.New(), + } +} + +func (b *BufPair) HasData() bool { + return b.Data.Len() != 0 +} + +func (b *BufPair) HasErrors() bool { + return b.Errors.Len() != 0 +} + +func (b *BufPair) Reset() { + b.Data.Reset() + b.Errors.Reset() +} + +func (b *BufPair) writeErrors(data []byte) { + b.Errors.WriteBytes(data) +} + +func (b *BufPair) WriteErr(message, locations, path, extensions []byte) { + if b.HasErrors() { + b.writeErrors(comma) + } + b.writeErrors(lBrace) + b.writeErrors(quote) + b.writeErrors(literalMessage) + b.writeErrors(quote) + b.writeErrors(colon) + b.writeErrors(quote) + b.writeErrors(message) + b.writeErrors(quote) + + if locations != nil { + b.writeErrors(comma) + b.writeErrors(quote) + b.writeErrors(literalLocations) + b.writeErrors(quote) + b.writeErrors(colon) + b.writeErrors(locations) + } + + if path != nil { + b.writeErrors(comma) + b.writeErrors(quote) + b.writeErrors(literalPath) + b.writeErrors(quote) + b.writeErrors(colon) + b.writeErrors(path) + } + + if extensions != nil { + b.writeErrors(comma) + b.writeErrors(quote) + b.writeErrors(literalExtensions) + b.writeErrors(quote) + b.writeErrors(colon) + b.writeErrors(extensions) + } + + b.writeErrors(rBrace) +} + +func (r *Resolver) MergeBufPairs(from, to *BufPair, prefixDataWithComma bool) { + r.MergeBufPairData(from, to, prefixDataWithComma) + r.MergeBufPairErrors(from, to) +} + +func (r *Resolver) MergeBufPairData(from, to *BufPair, prefixDataWithComma bool) { + if !from.HasData() { + return + } + if prefixDataWithComma { + to.Data.WriteBytes(comma) + } + to.Data.WriteBytes(from.Data.Bytes()) + from.Data.Reset() +} + +func (r *Resolver) MergeBufPairErrors(from, to *BufPair) { + if !from.HasErrors() { + return + } + if to.HasErrors() { + to.Errors.WriteBytes(comma) + } + to.Errors.WriteBytes(from.Errors.Bytes()) + from.Errors.Reset() +} + +func (r *Resolver) freeBufPair(pair *BufPair) { + pair.Data.Reset() + pair.Errors.Reset() + r.bufPairPool.Put(pair) +} + +func (r *Resolver) getResultSet() *resultSet { + return r.resultSetPool.Get().(*resultSet) +} + +func (r *Resolver) getBufPair() *BufPair { + return r.bufPairPool.Get().(*BufPair) +} + +func (r *Resolver) getBufPairSlice() *[]*BufPair { + return r.bufPairSlicePool.Get().(*[]*BufPair) +} + +func (r *Resolver) freeBufPairSlice(slice *[]*BufPair) { + for i := range *slice { + r.freeBufPair((*slice)[i]) + } + *slice = (*slice)[:0] + r.bufPairSlicePool.Put(slice) +} + +func (r *Resolver) getErrChan() chan error { + return r.errChanPool.Get().(chan error) +} + +func (r *Resolver) freeErrChan(ch chan error) { + r.errChanPool.Put(ch) +} + +func (r *Resolver) getWaitGroup() *sync.WaitGroup { + return r.waitGroupPool.Get().(*sync.WaitGroup) +} + +func (r *Resolver) freeWaitGroup(wg *sync.WaitGroup) { + r.waitGroupPool.Put(wg) +} + +func writeGraphqlResponse(buf *BufPair, writer io.Writer, ignoreData bool) (err error) { + hasErrors := buf.Errors.Len() != 0 + hasData := buf.Data.Len() != 0 && !ignoreData + + err = writeSafe(err, writer, lBrace) + + if hasErrors { + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, literalErrors) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, colon) + err = writeSafe(err, writer, lBrack) + err = writeSafe(err, writer, buf.Errors.Bytes()) + err = writeSafe(err, writer, rBrack) + err = writeSafe(err, writer, comma) + } + + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, literalData) + err = writeSafe(err, writer, quote) + err = writeSafe(err, writer, colon) + + if hasData { + _, err = writer.Write(buf.Data.Bytes()) + } else { + err = writeSafe(err, writer, literal.NULL) + } + err = writeSafe(err, writer, rBrace) + + return err +} + +func writeSafe(err error, writer io.Writer, data []byte) error { + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/variable.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/variable.go new file mode 100644 index 00000000000..efa0bd64991 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve/variable.go @@ -0,0 +1,640 @@ +package resolve + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphqljsonschema" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type VariableKind int + +const ( + ContextVariableKind VariableKind = iota + 1 + ObjectVariableKind + HeaderVariableKind +) + +const ( + VariableRendererKindPlain = "plain" + VariableRendererKindPlanWithValidation = "plainWithValidation" + VariableRendererKindJson = "json" + VariableRendererKindJsonWithValidation = "jsonWithValidation" + VariableRendererKindGraphqlWithValidation = "graphqlWithValidation" + VariableRendererKindCsv = "csv" +) + +// VariableRenderer is the interface to allow custom implementations of rendering Variables +// Depending on where a Variable is being used, a different method for rendering is required +// E.g. a Variable needs to be rendered conforming to the GraphQL specification, when used within a GraphQL Query +// If a Variable is used within a JSON Object, the contents need to be rendered as a JSON Object +type VariableRenderer interface { + GetKind() string + RenderVariable(ctx context.Context, data []byte, out io.Writer) error +} + +// JSONVariableRenderer is an implementation of VariableRenderer +// It renders the provided data as JSON +// If configured, it also does a JSON Validation Check before rendering +type JSONVariableRenderer struct { + JSONSchema string + Kind string + validator *graphqljsonschema.Validator + rootValueType JsonRootType +} + +func (r *JSONVariableRenderer) GetKind() string { + return r.Kind +} + +func (r *JSONVariableRenderer) RenderVariable(ctx context.Context, data []byte, out io.Writer) error { + if r.validator != nil { + err := r.validator.Validate(ctx, data) + if err != nil { + return fmt.Errorf("could not render JSON variable, %w", err) + } + } + _, err := out.Write(data) + return err +} + +func NewJSONVariableRenderer() *JSONVariableRenderer { + return &JSONVariableRenderer{ + Kind: VariableRendererKindJson, + } +} + +func NewJSONVariableRendererWithValidation(jsonSchema string) *JSONVariableRenderer { + validator := graphqljsonschema.MustNewValidatorFromString(jsonSchema) + return &JSONVariableRenderer{ + Kind: VariableRendererKindJsonWithValidation, + JSONSchema: jsonSchema, + validator: validator, + } +} + +// NewJSONVariableRendererWithValidationFromTypeRef creates a new JSONVariableRenderer +// The argument typeRef must exist on the operation ast.Document, otherwise it will panic! +func NewJSONVariableRendererWithValidationFromTypeRef(operation, definition *ast.Document, variableTypeRef int) (*JSONVariableRenderer, error) { + jsonSchema := graphqljsonschema.FromTypeRef(operation, definition, variableTypeRef) + validator, err := graphqljsonschema.NewValidatorFromSchema(jsonSchema) + if err != nil { + return nil, err + } + schemaBytes, err := json.Marshal(jsonSchema) + if err != nil { + return nil, err + } + return &JSONVariableRenderer{ + Kind: VariableRendererKindJsonWithValidation, + JSONSchema: string(schemaBytes), + validator: validator, + rootValueType: getJSONRootType(operation, definition, variableTypeRef), + }, nil +} + +func NewPlainVariableRenderer() *PlainVariableRenderer { + return &PlainVariableRenderer{ + Kind: VariableRendererKindPlain, + } +} + +func NewPlainVariableRendererWithValidation(jsonSchema string) *PlainVariableRenderer { + validator := graphqljsonschema.MustNewValidatorFromString(jsonSchema) + return &PlainVariableRenderer{ + Kind: VariableRendererKindPlanWithValidation, + JSONSchema: jsonSchema, + validator: validator, + } +} + +// NewPlainVariableRendererWithValidationFromTypeRef creates a new PlainVariableRenderer +// The argument typeRef must exist on the operation ast.Document, otherwise it will panic! +func NewPlainVariableRendererWithValidationFromTypeRef(operation, definition *ast.Document, variableTypeRef int, variablePath ...string) (*PlainVariableRenderer, error) { + var jsonSchema graphqljsonschema.JsonSchema + if len(variablePath) > 1 { + jsonSchema = graphqljsonschema.FromTypeRef(operation, definition, variableTypeRef, graphqljsonschema.WithPath(variablePath[1:])) + } else { + jsonSchema = graphqljsonschema.FromTypeRef(operation, definition, variableTypeRef) + } + + validator, err := graphqljsonschema.NewValidatorFromSchema(jsonSchema) + if err != nil { + return nil, err + } + schemaBytes, err := json.Marshal(jsonSchema) + if err != nil { + return nil, err + } + rootValueType := getJSONRootType(operation, definition, variableTypeRef) + return &PlainVariableRenderer{ + Kind: VariableRendererKindPlanWithValidation, + JSONSchema: string(schemaBytes), + validator: validator, + rootValueType: rootValueType, + }, nil +} + +// PlainVariableRenderer is an implementation of VariableRenderer +// It renders the provided data as plain text +// E.g. a provided JSON string of "foo" will be rendered as foo, without quotes. +// If a nested JSON Object is provided, it will be rendered as is. +// This renderer can be used e.g. to render the provided scalar into a URL. +type PlainVariableRenderer struct { + JSONSchema string + Kind string + validator *graphqljsonschema.Validator + rootValueType JsonRootType +} + +func (p *PlainVariableRenderer) GetKind() string { + return p.Kind +} + +func (p *PlainVariableRenderer) RenderVariable(ctx context.Context, data []byte, out io.Writer) error { + if p.validator != nil { + err := p.validator.Validate(ctx, data) + if err != nil { + return fmt.Errorf("could not render plain text variable, %w", err) + } + } + + data, _ = extractStringWithQuotes(p.rootValueType, data) + + _, err := out.Write(data) + return err +} + +// NewGraphQLVariableRendererFromTypeRef creates a new GraphQLVariableRenderer +// The argument typeRef must exist on the operation ast.Document, otherwise it will panic! +func NewGraphQLVariableRendererFromTypeRef(operation, definition *ast.Document, variableTypeRef int) (*GraphQLVariableRenderer, error) { + jsonSchema := graphqljsonschema.FromTypeRef(operation, definition, variableTypeRef) + validator, err := graphqljsonschema.NewValidatorFromSchema(jsonSchema) + if err != nil { + return nil, err + } + schemaBytes, err := json.Marshal(jsonSchema) + if err != nil { + return nil, err + } + return &GraphQLVariableRenderer{ + Kind: VariableRendererKindGraphqlWithValidation, + JSONSchema: string(schemaBytes), + validator: validator, + rootValueType: getJSONRootType(operation, definition, variableTypeRef), + }, nil +} + +func NewGraphQLVariableRendererFromTypeRefWithOverrides(operation, definition *ast.Document, variableTypeRef int, overrides map[string]graphqljsonschema.JsonSchema) (*GraphQLVariableRenderer, error) { + jsonSchema := graphqljsonschema.FromTypeRef(operation, definition, variableTypeRef, graphqljsonschema.WithOverrides(overrides)) + validator, err := graphqljsonschema.NewValidatorFromSchema(jsonSchema) + if err != nil { + return nil, err + } + schemaBytes, err := json.Marshal(jsonSchema) + if err != nil { + return nil, err + } + return &GraphQLVariableRenderer{ + Kind: VariableRendererKindGraphqlWithValidation, + JSONSchema: string(schemaBytes), + validator: validator, + rootValueType: getJSONRootType(operation, definition, variableTypeRef), + }, nil +} + +func NewGraphQLVariableRendererFromTypeRefWithoutValidation(operation, definition *ast.Document, variableTypeRef int) (*GraphQLVariableRenderer, error) { + return &GraphQLVariableRenderer{ + Kind: VariableRendererKindGraphqlWithValidation, + rootValueType: getJSONRootType(operation, definition, variableTypeRef), + }, nil +} + +// NewGraphQLVariableRendererFromJSONRootTypeWithoutValidation - to be used in tests only +func NewGraphQLVariableRendererFromJSONRootTypeWithoutValidation(rootType JsonRootType) (*GraphQLVariableRenderer, error) { + return &GraphQLVariableRenderer{ + Kind: VariableRendererKindGraphqlWithValidation, + rootValueType: rootType, + }, nil +} + +// NewGraphQLVariableRenderer - to be used in tests only +func NewGraphQLVariableRenderer(jsonSchema string) *GraphQLVariableRenderer { + validator := graphqljsonschema.MustNewValidatorFromString(jsonSchema) + rootValueType, err := graphqljsonschema.TopLevelType(jsonSchema) + if err != nil { + panic(err) + } + return &GraphQLVariableRenderer{ + Kind: VariableRendererKindGraphqlWithValidation, + JSONSchema: jsonSchema, + validator: validator, + rootValueType: JsonRootType{ + Value: rootValueType, + Kind: JsonRootTypeKindSingle, + }, + } +} + +type JsonRootTypeKind int + +const ( + JsonRootTypeKindSingle JsonRootTypeKind = iota + JsonRootTypeKindMultiple +) + +type JsonRootType struct { + Value jsonparser.ValueType + Values []jsonparser.ValueType + Kind JsonRootTypeKind +} + +func (t JsonRootType) Satisfies(dataType jsonparser.ValueType) bool { + switch t.Kind { + case JsonRootTypeKindSingle: + return dataType == t.Value + case JsonRootTypeKindMultiple: + for _, valueType := range t.Values { + if dataType == valueType { + return true + } + } + } + + return false +} + +func getJSONRootType(operation, definition *ast.Document, variableTypeRef int) JsonRootType { + variableTypeRef = operation.ResolveListOrNameType(variableTypeRef) + if operation.TypeIsList(variableTypeRef) { + return JsonRootType{ + Value: jsonparser.Array, + Kind: JsonRootTypeKindSingle, + } + } + + name := operation.TypeNameString(variableTypeRef) + node, exists := definition.Index.FirstNodeByNameStr(name) + if !exists { + return JsonRootType{ + Value: jsonparser.Unknown, + Kind: JsonRootTypeKindSingle, + } + } + + defTypeRef := node.Ref + + if node.Kind == ast.NodeKindEnumTypeDefinition { + return JsonRootType{ + Value: jsonparser.String, + Kind: JsonRootTypeKindSingle, + } + } + if node.Kind == ast.NodeKindScalarTypeDefinition { + typeName := definition.ScalarTypeDefinitionNameString(defTypeRef) + switch typeName { + case "Boolean": + return JsonRootType{ + Value: jsonparser.Boolean, + Kind: JsonRootTypeKindSingle, + } + case "Int", "Float": + return JsonRootType{ + Value: jsonparser.Number, + Kind: JsonRootTypeKindSingle, + } + case "ID": + return JsonRootType{ + Values: []jsonparser.ValueType{jsonparser.String, jsonparser.Number}, + Kind: JsonRootTypeKindMultiple, + } + case "String", "Date": + return JsonRootType{ + Value: jsonparser.String, + Kind: JsonRootTypeKindSingle, + } + case "_Any": + return JsonRootType{ + Value: jsonparser.Object, + Kind: JsonRootTypeKindSingle, + } + default: + return JsonRootType{ + Value: jsonparser.String, + Kind: JsonRootTypeKindSingle, + } + } + } + + return JsonRootType{ + Value: jsonparser.Object, + Kind: JsonRootTypeKindSingle, + } +} + +// GraphQLVariableRenderer is an implementation of VariableRenderer +// It renders variables according to the GraphQL Specification +type GraphQLVariableRenderer struct { + JSONSchema string + Kind string + validator *graphqljsonschema.Validator + rootValueType JsonRootType +} + +func (g *GraphQLVariableRenderer) GetKind() string { + return g.Kind +} + +// add renderer that renders both variable name and variable value +// before rendering, evaluate if the value contains null values +// if an object contains only null values, set the object to null +// do this recursively until reaching the root of the object + +func (g *GraphQLVariableRenderer) RenderVariable(ctx context.Context, data []byte, out io.Writer) error { + if g.validator != nil { + err := g.validator.Validate(ctx, data) + if err != nil { + return fmt.Errorf("could not render GraphQL variable, %w", err) + } + } + + var desiredType jsonparser.ValueType + data, desiredType = extractStringWithQuotes(g.rootValueType, data) + + return g.renderGraphQLValue(data, desiredType, out) +} + +func (g *GraphQLVariableRenderer) renderGraphQLValue(data []byte, valueType jsonparser.ValueType, out io.Writer) (err error) { + switch valueType { + case jsonparser.String: + _, _ = out.Write(literal.BACKSLASH) + _, _ = out.Write(literal.QUOTE) + for i := range data { + switch data[i] { + case '"': + _, _ = out.Write(literal.BACKSLASH) + _, _ = out.Write(literal.BACKSLASH) + _, _ = out.Write(literal.QUOTE) + default: + _, _ = out.Write(data[i : i+1]) + } + } + _, _ = out.Write(literal.BACKSLASH) + _, _ = out.Write(literal.QUOTE) + case jsonparser.Object: + _, _ = out.Write(literal.LBRACE) + first := true + err = jsonparser.ObjectEach(data, func(key []byte, value []byte, objectFieldValueType jsonparser.ValueType, offset int) error { + if !first { + _, _ = out.Write(literal.COMMA) + } else { + first = false + } + _, _ = out.Write(key) + _, _ = out.Write(literal.COLON) + return g.renderGraphQLValue(value, objectFieldValueType, out) + }) + if err != nil { + return err + } + _, _ = out.Write(literal.RBRACE) + case jsonparser.Null: + _, _ = out.Write(literal.NULL) + case jsonparser.Boolean: + _, _ = out.Write(data) + case jsonparser.Array: + _, _ = out.Write(literal.LBRACK) + first := true + var arrayErr error + _, err = jsonparser.ArrayEach(data, func(value []byte, arrayItemValueType jsonparser.ValueType, offset int, err error) { + if !first { + _, _ = out.Write(literal.COMMA) + } else { + first = false + } + arrayErr = g.renderGraphQLValue(value, arrayItemValueType, out) + }) + if arrayErr != nil { + return arrayErr + } + if err != nil { + return err + } + _, _ = out.Write(literal.RBRACK) + case jsonparser.Number: + _, _ = out.Write(data) + } + return +} + +func NewCSVVariableRenderer(arrayValueType JsonRootType) *CSVVariableRenderer { + return &CSVVariableRenderer{ + Kind: VariableRendererKindCsv, + arrayValueType: arrayValueType, + } +} + +func NewCSVVariableRendererFromTypeRef(operation, definition *ast.Document, variableTypeRef int) *CSVVariableRenderer { + return &CSVVariableRenderer{ + Kind: VariableRendererKindCsv, + arrayValueType: getJSONRootType(operation, definition, variableTypeRef), + } +} + +// CSVVariableRenderer is an implementation of VariableRenderer +// It renders the provided list of Values as comma separated Values in plaintext (no JSON encoding of Values) +type CSVVariableRenderer struct { + Kind string + arrayValueType JsonRootType +} + +func (c *CSVVariableRenderer) GetKind() string { + return c.Kind +} + +func (c *CSVVariableRenderer) RenderVariable(_ context.Context, data []byte, out io.Writer) error { + isFirst := true + _, err := jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + if !c.arrayValueType.Satisfies(dataType) { + return + } + + if isFirst { + isFirst = false + } else { + _, _ = out.Write(literal.COMMA) + } + _, _ = out.Write(value) + }) + return err +} + +type ContextVariable struct { + Path []string + Renderer VariableRenderer +} + +func (c *ContextVariable) TemplateSegment() TemplateSegment { + return TemplateSegment{ + SegmentType: VariableSegmentType, + VariableKind: ContextVariableKind, + VariableSourcePath: c.Path, + Renderer: c.Renderer, + } +} + +func (c *ContextVariable) Equals(another Variable) bool { + if another == nil { + return false + } + if another.GetVariableKind() != c.GetVariableKind() { + return false + } + anotherContextVariable := another.(*ContextVariable) + if len(c.Path) != len(anotherContextVariable.Path) { + return false + } + for i := range c.Path { + if c.Path[i] != anotherContextVariable.Path[i] { + return false + } + } + return true +} + +func (_ *ContextVariable) GetVariableKind() VariableKind { + return ContextVariableKind +} + +type ObjectVariable struct { + Path []string + Renderer VariableRenderer +} + +func (o *ObjectVariable) TemplateSegment() TemplateSegment { + return TemplateSegment{ + SegmentType: VariableSegmentType, + VariableKind: ObjectVariableKind, + VariableSourcePath: o.Path, + Renderer: o.Renderer, + } +} + +func (o *ObjectVariable) Equals(another Variable) bool { + if another == nil { + return false + } + if another.GetVariableKind() != o.GetVariableKind() { + return false + } + anotherObjectVariable := another.(*ObjectVariable) + if len(o.Path) != len(anotherObjectVariable.Path) { + return false + } + for i := range o.Path { + if o.Path[i] != anotherObjectVariable.Path[i] { + return false + } + } + return true +} + +func (o *ObjectVariable) GetVariableKind() VariableKind { + return ObjectVariableKind +} + +type HeaderVariable struct { + Path []string +} + +func (h *HeaderVariable) TemplateSegment() TemplateSegment { + return TemplateSegment{ + SegmentType: VariableSegmentType, + VariableKind: HeaderVariableKind, + VariableSourcePath: h.Path, + } +} + +func (h *HeaderVariable) GetVariableKind() VariableKind { + return HeaderVariableKind +} + +func (h *HeaderVariable) Equals(another Variable) bool { + if another == nil { + return false + } + if another.GetVariableKind() != h.GetVariableKind() { + return false + } + anotherHeaderVariable := another.(*HeaderVariable) + if len(h.Path) != len(anotherHeaderVariable.Path) { + return false + } + for i := range h.Path { + if h.Path[i] != anotherHeaderVariable.Path[i] { + return false + } + } + return true +} + +type Variable interface { + GetVariableKind() VariableKind + Equals(another Variable) bool + TemplateSegment() TemplateSegment +} + +type Variables []Variable + +func NewVariables(variables ...Variable) Variables { + return variables +} + +const ( + variablePrefixSuffix = "$$" +) + +func (v *Variables) AddVariable(variable Variable) (name string, exists bool) { + index := -1 + for i := range *v { + if (*v)[i].Equals(variable) { + index = i + exists = true + break + } + } + if index == -1 { + *v = append(*v, variable) + index = len(*v) - 1 + } + i := strconv.Itoa(index) + name = variablePrefixSuffix + i + variablePrefixSuffix + return +} + +type VariableSchema struct { +} + +func extractStringWithQuotes(rootValueType JsonRootType, data []byte) ([]byte, jsonparser.ValueType) { + desiredType := jsonparser.Unknown + switch rootValueType.Kind { + case JsonRootTypeKindSingle: + desiredType = rootValueType.Value + case JsonRootTypeKindMultiple: + _, tt, _, _ := jsonparser.Get(data) + if rootValueType.Satisfies(tt) { + desiredType = tt + } + } + if desiredType == jsonparser.String { + return data[1 : len(data)-1], desiredType + } + return data, desiredType +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource.go new file mode 100644 index 00000000000..e675b5a92fd --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource.go @@ -0,0 +1,272 @@ +package datasource + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/asttransform" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +var RootTypeName = []byte("root_type_name") +var RootFieldName = []byte("root_field_name") + +var defaultHttpClient *http.Client + +func DefaultHttpClient() *http.Client { + if defaultHttpClient == nil { + defaultHttpClient = &http.Client{ + Timeout: time.Second * 10, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1024, + TLSHandshakeTimeout: 0 * time.Second, + }, + } + } + + return defaultHttpClient +} + +type ResolverArgs interface { + ByKey(key []byte) []byte + Dump() []string + Keys() [][]byte +} + +type DataSource interface { + Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) +} + +type Planner interface { + CorePlanner + PlannerVisitors +} + +type CorePlanner interface { + // Plan plan returns the pre configured DataSource as well as the Arguments + // During runtime the arguments get resolved and passed to the DataSource + Plan(args []Argument) (DataSource, []Argument) + // Configure is the function to initialize all important values for the Planner to function correctly + // You probably need access to the Walker, Operation and ObjectDefinition to use the Planner to its full power + // Walker gives you useful information from within all visitor Callbacks, e.g. the Path & Ancestors + // Operation is the AST of the GraphQL Operation + // ObjectDefinition is the AST of the GraphQL schema ObjectDefinition + // Args are the pre-calculated Arguments from the planner + // resolverParameters are the parameters from the @directive params field + Configure(operation, definition *ast.Document, walker *astvisitor.Walker) +} + +type PlannerVisitors interface { + astvisitor.EnterDocumentVisitor + astvisitor.EnterInlineFragmentVisitor + astvisitor.LeaveInlineFragmentVisitor + astvisitor.EnterSelectionSetVisitor + astvisitor.LeaveSelectionSetVisitor + astvisitor.EnterFieldVisitor + astvisitor.EnterArgumentVisitor + astvisitor.LeaveFieldVisitor +} + +type PlannerFactory interface { + DataSourcePlanner() Planner +} + +type PlannerFactoryFactory interface { + Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) +} + +type BasePlanner struct { + Log abstractlogger.Logger + Walker *astvisitor.Walker // nolint + Definition, Operation *ast.Document // nolint + Args []Argument // nolint + RootField rootField // nolint + Config PlannerConfiguration // nolint +} + +func NewBaseDataSourcePlanner(schema []byte, config PlannerConfiguration, logger abstractlogger.Logger) (*BasePlanner, error) { + definition, report := astparser.ParseGraphqlDocumentBytes(schema) + if report.HasErrors() { + return nil, report + } + + err := asttransform.MergeDefinitionWithBaseSchema(&definition) + if err != nil { + return nil, err + } + + return &BasePlanner{ + Config: config, + Log: logger, + Definition: &definition, + }, nil +} + +func (b *BasePlanner) Configure(operation, definition *ast.Document, walker *astvisitor.Walker) { + b.Operation, b.Definition, b.Walker = operation, definition, walker +} + +func (b *BasePlanner) RegisterDataSourcePlannerFactory(dataSourceName string, factory PlannerFactoryFactory) (err error) { + for i := range b.Config.TypeFieldConfigurations { + if dataSourceName != b.Config.TypeFieldConfigurations[i].DataSource.Name { + continue + } + configReader := bytes.NewReader(b.Config.TypeFieldConfigurations[i].DataSource.Config) + b.Config.TypeFieldConfigurations[i].DataSourcePlannerFactory, err = factory.Initialize(*b, configReader) + if err != nil { + return err + } + } + return nil +} + +type PlannerConfiguration struct { + TypeFieldConfigurations []TypeFieldConfiguration +} + +type TypeFieldConfiguration struct { + TypeName string `bson:"type_name" json:"type_name"` + FieldName string `bson:"field_name" json:"field_name"` + Mapping *MappingConfiguration `bson:"mapping" json:"mapping"` + DataSource SourceConfig `bson:"data_source" json:"data_source"` + DataSourcePlannerFactory PlannerFactory `bson:"-" json:"-"` +} + +type SourceConfig struct { + // Kind defines the unique identifier of the DataSource + // Kind needs to match to the Planner "DataSourceName" name + Name string `bson:"kind" json:"kind"` + // Config is the DataSource specific configuration object + // Each Planner needs to make sure to parse their Config Object correctly + Config json.RawMessage `bson:"data_source_config" json:"data_source_config"` +} + +type MappingConfiguration struct { + Disabled bool `bson:"disabled" json:"disabled"` + Path string `bson:"path" json:"path"` +} + +func (p *PlannerConfiguration) DataSourcePlannerFactoryForTypeField(typeName, fieldName string) PlannerFactory { + for i := range p.TypeFieldConfigurations { + if strings.EqualFold(p.TypeFieldConfigurations[i].TypeName, typeName) && strings.EqualFold(p.TypeFieldConfigurations[i].FieldName, fieldName) { + return p.TypeFieldConfigurations[i].DataSourcePlannerFactory + } + } + return nil +} + +func (p *PlannerConfiguration) MappingForTypeField(typeName, fieldName string) *MappingConfiguration { + for i := range p.TypeFieldConfigurations { + if strings.EqualFold(p.TypeFieldConfigurations[i].TypeName, typeName) && strings.EqualFold(p.TypeFieldConfigurations[i].FieldName, fieldName) { + return p.TypeFieldConfigurations[i].Mapping + } + } + return nil +} + +type rootField struct { + isDefined bool + ref int +} + +func (r *rootField) SetIfNotDefined(ref int) { + if r.isDefined { + return + } + r.isDefined = true + r.ref = ref +} + +func (r *rootField) IsDefinedAndEquals(ref int) bool { + return r.isDefined && r.ref == ref +} + +type visitingDataSourcePlanner struct { + CorePlanner +} + +func (_ visitingDataSourcePlanner) EnterDocument(operation, definition *ast.Document) {} +func (_ visitingDataSourcePlanner) EnterInlineFragment(ref int) {} +func (_ visitingDataSourcePlanner) LeaveInlineFragment(ref int) {} +func (_ visitingDataSourcePlanner) EnterSelectionSet(ref int) {} +func (_ visitingDataSourcePlanner) LeaveSelectionSet(ref int) {} +func (_ visitingDataSourcePlanner) EnterField(ref int) {} +func (_ visitingDataSourcePlanner) EnterArgument(ref int) {} +func (_ visitingDataSourcePlanner) LeaveField(ref int) {} + +func SimpleDataSourcePlanner(core CorePlanner) Planner { + return &visitingDataSourcePlanner{ + CorePlanner: core, + } +} + +type Argument interface { + ArgName() []byte +} + +type ContextVariableArgument struct { + Name []byte + VariableName []byte +} + +func (c *ContextVariableArgument) ArgName() []byte { + return c.Name +} + +type PathSelector struct { + Path string +} + +type ObjectVariableArgument struct { + Name []byte + PathSelector PathSelector +} + +func (o *ObjectVariableArgument) ArgName() []byte { + return o.Name +} + +type StaticVariableArgument struct { + Name []byte + Value []byte +} + +func (s *StaticVariableArgument) ArgName() []byte { + return s.Name +} + +type ListArgument struct { + Name []byte + Arguments []Argument +} + +func (l ListArgument) ArgName() []byte { + return l.Name +} + +func isWhitelistedScheme(scheme string, whitelistedSchemes []string, defaultSchemes []string) bool { + schemes := append(whitelistedSchemes, defaultSchemes...) + for _, whitelistedScheme := range schemes { + if scheme == whitelistedScheme { + return true + } + } + + return false +} + +func parseURLBytes(urlArg []byte) (parsedURL *url.URL, rawURL string, err error) { + rawURL = string(urlArg) + parsedURL, err = url.Parse(rawURL) + return parsedURL, rawURL, err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_graphql.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_graphql.go new file mode 100644 index 00000000000..b18c87e4977 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_graphql.go @@ -0,0 +1,471 @@ +package datasource + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + log "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +var graphqlSchemes = []string{ + "https", + "http", +} + +type GraphqlRequest struct { + OperationName string `json:"operationName"` + Variables json.RawMessage `json:"variables"` + Query string `json:"query"` +} + +// GraphQLDataSourceConfig is the configuration for the GraphQL DataSource +type GraphQLDataSourceConfig struct { + // URL is the url of the upstream + URL string `bson:"url" json:"url"` + // Method is the http.Method of the upstream, defaults to POST (optional) + Method *string `bson:"method" json:"method"` +} + +type GraphQLDataSourcePlanner struct { + BasePlanner + importer *astimport.Importer + nodes []ast.Node + resolveDocument *ast.Document + dataSourceConfiguration GraphQLDataSourceConfig + client *http.Client + whitelistedSchemes []string + whitelistedVariableRefs []int + whitelistedVariableNameHashs map[uint64]bool + hooks Hooks +} + +type GraphQLDataSourcePlannerFactoryFactory struct { + Client *http.Client + WhitelistedSchemes []string + Hooks Hooks +} + +func (g *GraphQLDataSourcePlannerFactoryFactory) httpClient() *http.Client { + if g.Client != nil { + return g.Client + } + return DefaultHttpClient() +} + +func (g GraphQLDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &GraphQLDataSourcePlannerFactory{ + base: base, + client: g.httpClient(), + whitelistedSchemes: g.WhitelistedSchemes, + hooks: g.Hooks, + } + err := json.NewDecoder(configReader).Decode(&factory.config) + return factory, err +} + +type GraphQLDataSourcePlannerFactory struct { + base BasePlanner + config GraphQLDataSourceConfig + client *http.Client + whitelistedSchemes []string + hooks Hooks +} + +func (g *GraphQLDataSourcePlannerFactory) DataSourcePlanner() Planner { + return &GraphQLDataSourcePlanner{ + BasePlanner: g.base, + importer: &astimport.Importer{}, + dataSourceConfiguration: g.config, + resolveDocument: &ast.Document{}, + client: g.client, + whitelistedSchemes: g.whitelistedSchemes, + whitelistedVariableRefs: []int{}, + whitelistedVariableNameHashs: map[uint64]bool{}, + hooks: g.hooks, + } +} + +func (g *GraphQLDataSourcePlanner) EnterDocument(operation, definition *ast.Document) { + g.whitelistedVariableRefs = g.whitelistedVariableRefs[:0] +} + +func (g *GraphQLDataSourcePlanner) EnterInlineFragment(ref int) { + if len(g.nodes) == 0 { + return + } + current := g.nodes[len(g.nodes)-1] + if current.Kind != ast.NodeKindSelectionSet { + return + } + inlineFragmentType := g.importer.ImportType(g.Operation.InlineFragments[ref].TypeCondition.Type, g.Operation, g.resolveDocument) + g.resolveDocument.InlineFragments = append(g.resolveDocument.InlineFragments, ast.InlineFragment{ + TypeCondition: ast.TypeCondition{ + Type: inlineFragmentType, + }, + SelectionSet: -1, + }) + inlineFragmentRef := len(g.resolveDocument.InlineFragments) - 1 + g.resolveDocument.Selections = append(g.resolveDocument.Selections, ast.Selection{ + Kind: ast.SelectionKindInlineFragment, + Ref: inlineFragmentRef, + }) + selectionRef := len(g.resolveDocument.Selections) - 1 + g.resolveDocument.SelectionSets[current.Ref].SelectionRefs = append(g.resolveDocument.SelectionSets[current.Ref].SelectionRefs, selectionRef) + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindInlineFragment, + Ref: inlineFragmentRef, + }) +} + +func (g *GraphQLDataSourcePlanner) LeaveInlineFragment(ref int) { + g.nodes = g.nodes[:len(g.nodes)-1] +} + +func (g *GraphQLDataSourcePlanner) EnterSelectionSet(ref int) { + + fieldOrInlineFragment := g.nodes[len(g.nodes)-1] + + set := ast.SelectionSet{} + g.resolveDocument.SelectionSets = append(g.resolveDocument.SelectionSets, set) + setRef := len(g.resolveDocument.SelectionSets) - 1 + + switch fieldOrInlineFragment.Kind { + case ast.NodeKindField: + g.resolveDocument.Fields[fieldOrInlineFragment.Ref].HasSelections = true + g.resolveDocument.Fields[fieldOrInlineFragment.Ref].SelectionSet = setRef + case ast.NodeKindInlineFragment: + g.resolveDocument.InlineFragments[fieldOrInlineFragment.Ref].HasSelections = true + g.resolveDocument.InlineFragments[fieldOrInlineFragment.Ref].SelectionSet = setRef + } + + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindSelectionSet, + Ref: setRef, + }) +} + +func (g *GraphQLDataSourcePlanner) LeaveSelectionSet(ref int) { + g.nodes = g.nodes[:len(g.nodes)-1] +} + +func (g *GraphQLDataSourcePlanner) EnterField(ref int) { + if !g.RootField.isDefined { + g.RootField.SetIfNotDefined(ref) + + typeName := g.Definition.NodeNameString(g.Walker.EnclosingTypeDefinition) + fieldNameStr := g.Operation.FieldNameUnsafeString(ref) + fieldName := g.Operation.FieldNameBytes(ref) + + g.Args = append(g.Args, &StaticVariableArgument{ + Name: RootTypeName, + Value: []byte(typeName), + }) + + g.Args = append(g.Args, &StaticVariableArgument{ + Name: RootFieldName, + Value: fieldName, + }) + + mapping := g.Config.MappingForTypeField(typeName, fieldNameStr) + if mapping != nil && !mapping.Disabled { + fieldName = unsafebytes.StringToBytes(mapping.Path) + } + + hasArguments := g.Operation.FieldHasArguments(ref) + var argumentRefs []int + if hasArguments { + argumentRefs = g.importer.ImportArguments(g.Operation.FieldArguments(ref), g.Operation, g.resolveDocument) + } + + field := ast.Field{ + Name: g.resolveDocument.Input.AppendInputBytes(fieldName), + Arguments: ast.ArgumentList{ + Refs: argumentRefs, + }, + HasArguments: hasArguments, + } + g.resolveDocument.Fields = append(g.resolveDocument.Fields, field) + fieldRef := len(g.resolveDocument.Fields) - 1 + selection := ast.Selection{ + Kind: ast.SelectionKindField, + Ref: fieldRef, + } + g.resolveDocument.Selections = append(g.resolveDocument.Selections, selection) + selectionRef := len(g.resolveDocument.Selections) - 1 + set := ast.SelectionSet{ + SelectionRefs: []int{selectionRef}, + } + g.resolveDocument.SelectionSets = append(g.resolveDocument.SelectionSets, set) + setRef := len(g.resolveDocument.SelectionSets) - 1 + operationDefinition := ast.OperationDefinition{ + Name: g.resolveDocument.Input.AppendInputBytes([]byte("o")), + OperationType: g.Operation.OperationDefinitions[g.Walker.Ancestors[0].Ref].OperationType, + SelectionSet: setRef, + HasSelections: true, + } + g.resolveDocument.OperationDefinitions = append(g.resolveDocument.OperationDefinitions, operationDefinition) + operationDefinitionRef := len(g.resolveDocument.OperationDefinitions) - 1 + g.resolveDocument.RootNodes = append(g.resolveDocument.RootNodes, ast.Node{ + Kind: ast.NodeKindOperationDefinition, + Ref: operationDefinitionRef, + }) + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindOperationDefinition, + Ref: operationDefinitionRef, + }) + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindSelectionSet, + Ref: setRef, + }) + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindField, + Ref: fieldRef, + }) + } else { + field := ast.Field{ + Name: g.resolveDocument.Input.AppendInputBytes(g.Operation.FieldNameBytes(ref)), + } + g.resolveDocument.Fields = append(g.resolveDocument.Fields, field) + fieldRef := len(g.resolveDocument.Fields) - 1 + set := g.nodes[len(g.nodes)-1] + selection := ast.Selection{ + Kind: ast.SelectionKindField, + Ref: fieldRef, + } + g.resolveDocument.Selections = append(g.resolveDocument.Selections, selection) + selectionRef := len(g.resolveDocument.Selections) - 1 + g.resolveDocument.SelectionSets[set.Ref].SelectionRefs = append(g.resolveDocument.SelectionSets[set.Ref].SelectionRefs, selectionRef) + g.nodes = append(g.nodes, ast.Node{ + Kind: ast.NodeKindField, + Ref: fieldRef, + }) + } +} + +func (g *GraphQLDataSourcePlanner) EnterArgument(ref int) { + variableValue := g.Operation.ArgumentValue(ref) + if variableValue.Kind != ast.ValueKindVariable { + return + } + + variableName := g.Operation.VariableValueNameBytes(variableValue.Ref) + definitionRef, exists := g.Operation.VariableDefinitionByNameAndOperation(g.nodes[0].Ref, variableName) + if !exists { + return + } + + g.whitelistedVariableRefs = append(g.whitelistedVariableRefs, definitionRef) + g.whitelistedVariableNameHashs[xxhash.Sum64(variableName)] = true +} + +func (g *GraphQLDataSourcePlanner) LeaveField(ref int) { + defer func() { + g.nodes = g.nodes[:len(g.nodes)-1] + }() + if g.RootField.ref != ref { + return + } + + hasVariableDefinitions := len(g.Operation.OperationDefinitions[g.Walker.Ancestors[0].Ref].VariableDefinitions.Refs) != 0 + var variableDefinitionsRefs []int + if hasVariableDefinitions { + operationVariableDefinitions := g.Operation.OperationDefinitions[g.Walker.Ancestors[0].Ref].VariableDefinitions.Refs + definitions := make([]int, len(operationVariableDefinitions)) + copy(definitions, operationVariableDefinitions) + definitions = ast.FilterIntSliceByWhitelist(definitions, g.whitelistedVariableRefs) + + variableDefinitionsRefs = g.importer.ImportVariableDefinitions(definitions, g.Operation, g.resolveDocument) + g.resolveDocument.OperationDefinitions[0].HasVariableDefinitions = len(definitions) != 0 + g.resolveDocument.OperationDefinitions[0].VariableDefinitions.Refs = variableDefinitionsRefs + } + + buff := bytes.Buffer{} + err := astprinter.Print(g.resolveDocument, nil, &buff) + if err != nil { + g.Walker.StopWithInternalErr(err) + return + } + g.Args = append(g.Args, &StaticVariableArgument{ + Name: literal.URL, + Value: []byte(g.dataSourceConfiguration.URL), + }) + g.Args = append(g.Args, &StaticVariableArgument{ + Name: literal.QUERY, + Value: buff.Bytes(), + }) + if g.dataSourceConfiguration.Method == nil { + g.Args = append(g.Args, &StaticVariableArgument{ + Name: literal.METHOD, + Value: literal.HTTP_METHOD_POST, + }) + } else { + g.Args = append(g.Args, &StaticVariableArgument{ + Name: literal.URL, + Value: []byte(*g.dataSourceConfiguration.Method), + }) + } +} + +func (g *GraphQLDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + for i := range args { + if arg, ok := args[i].(*ContextVariableArgument); ok { + if bytes.HasPrefix(arg.Name, literal.DOT_ARGUMENTS_DOT) { + arg.Name = bytes.TrimPrefix(arg.Name, literal.DOT_ARGUMENTS_DOT) + + if g.whitelistedVariableNameHashs[xxhash.Sum64(arg.Name)] { + g.Args = append(g.Args, arg) + } else if g.whitelistedVariableNameHashs[xxhash.Sum64(arg.VariableName)] { + arg.Name = arg.VariableName + g.Args = append(g.Args, arg) + } + } + } + } + return &GraphQLDataSource{ + Log: g.Log, + Client: g.client, + WhitelistedSchemes: g.whitelistedSchemes, + Hooks: g.hooks, + }, g.Args +} + +type GraphQLDataSource struct { + Log log.Logger + Client *http.Client + WhitelistedSchemes []string + Hooks Hooks +} + +func (g *GraphQLDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + urlArg := args.ByKey(literal.URL) + queryArg := args.ByKey(literal.QUERY) + rootTypeName := args.ByKey(RootTypeName) + rootFieldName := args.ByKey(RootFieldName) + hookContext := HookContext{ + TypeName: string(rootTypeName), + FieldName: string(rootFieldName), + } + + g.Log.Debug("GraphQLDataSource.Resolve.Args", + log.Strings("resolvedArgs", args.Dump()), + ) + + if urlArg == nil || queryArg == nil { + g.Log.Error("GraphQLDataSource.Args invalid") + return + } + + parsedURL, rawURL, err := parseURLBytes(urlArg) + if err != nil { + g.Log.Error("GraphQLDataSource.RawURL could not be parsed", log.String("rawURL", rawURL)) + return + } + + if len(parsedURL.Scheme) == 0 || !isWhitelistedScheme(parsedURL.Scheme, g.WhitelistedSchemes, graphqlSchemes) { + parsedURL.Scheme = graphqlSchemes[0] + } + + variables := map[string]interface{}{} + keys := args.Keys() + for i := 0; i < len(keys); i++ { + switch { + case bytes.Equal(keys[i], literal.URL): + case bytes.Equal(keys[i], literal.QUERY): + case bytes.Equal(keys[i], RootTypeName): + case bytes.Equal(keys[i], RootFieldName): + default: + variables[string(keys[i])] = string(args.ByKey(keys[i])) + } + } + + variablesJson, err := json.Marshal(variables) + if err != nil { + g.Log.Error("GraphQLDataSource.json.Marshal(variables)", + log.Error(err), + ) + return n, err + } + + gqlRequest := GraphqlRequest{ + OperationName: "o", + Variables: variablesJson, + Query: string(queryArg), + } + + gqlRequestData, err := json.MarshalIndent(gqlRequest, "", " ") + if err != nil { + g.Log.Error("GraphQLDataSource.json.MarshalIndent", + log.Error(err), + ) + return n, err + } + + g.Log.Debug("GraphQLDataSource.request", + log.String("rawURL", rawURL), + log.String("parsedURL", parsedURL.String()), + log.ByteString("data", gqlRequestData), + ) + + request, err := http.NewRequest(http.MethodPost, parsedURL.String(), bytes.NewBuffer(gqlRequestData)) + if err != nil { + g.Log.Error("GraphQLDataSource.http.NewRequest", + log.Error(err), + ) + return n, err + } + + request.Header.Add("Content-Type", "application/json") + request.Header.Add("Accept", "application/json") + + if g.Hooks.PreSendHttpHook != nil { + g.Hooks.PreSendHttpHook.Execute(hookContext, request) + } + + res, err := g.Client.Do(request) + if err != nil { + g.Log.Error("GraphQLDataSource.client.Do", + log.Error(err), + ) + return n, err + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + g.Log.Error("GraphQLDataSource.ioutil.ReadAll", + log.Error(err), + ) + return n, err + } + + if g.Hooks.PostReceiveHttpHook != nil { + g.Hooks.PostReceiveHttpHook.Execute(hookContext, res, data) + } + + defer func() { + err := res.Body.Close() + if err != nil { + g.Log.Error("GraphQLDataSource.Resolve.Response.Body.Close", log.Error(err)) + } + }() + + data = bytes.ReplaceAll(data, literal.BACKSLASH, nil) + data, _, _, err = jsonparser.Get(data, "data") + if err != nil { + g.Log.Error("GraphQLDataSource.jsonparser.Get", + log.Error(err), + ) + return n, err + } + return out.Write(data) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_json.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_json.go new file mode 100644 index 00000000000..a74019028dd --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_json.go @@ -0,0 +1,401 @@ +package datasource + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/buger/jsonparser" + log "github.com/jensneuse/abstractlogger" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +var httpJsonSchemes = []string{ + "https", + "http", +} + +// HttpJsonDataSourceConfig is the configuration object for the HttpJsonDataSource +type HttpJsonDataSourceConfig struct { + // URL is the url of the upstream + URL string `bson:"url" json:"url"` + // Method is the http.Method, e.g. GET, POST, UPDATE, DELETE + // default is GET + Method *string `bson:"method" json:"method"` + // Body is the http body to send + // default is null/nil (no body) + Body *string `bson:"body" json:"body"` + // Headers defines the header mappings + Headers []HttpJsonDataSourceConfigHeader `bson:"headers" json:"headers"` + // DefaultTypeName is the optional variable to define a default type name for the response object + // This is useful in case the response might be a Union or Interface type which uses StatusCodeTypeNameMappings + DefaultTypeName *string `bson:"default_type_name" json:"default_type_name"` + // StatusCodeTypeNameMappings is a slice of mappings from http.StatusCode to GraphQL TypeName + // This can be used when the TypeName depends on the http.StatusCode + StatusCodeTypeNameMappings []StatusCodeTypeNameMapping `bson:"status_code_type_name_mappings" json:"status_code_type_name_mappings"` +} + +type StatusCodeTypeNameMapping struct { + StatusCode int `bson:"status_code" json:"status_code"` + TypeName string `bson:"type_name" json:"type_name"` +} + +type HttpJsonDataSourceConfigHeader struct { + Key string `bson:"key" json:"key"` + Value string `bson:"value" json:"value"` +} + +type HttpJsonDataSourcePlannerFactoryFactory struct { + Client *http.Client + WhitelistedSchemes []string + Hooks Hooks +} + +func (h *HttpJsonDataSourcePlannerFactoryFactory) httpClient() *http.Client { + if h.Client != nil { + return h.Client + } + return DefaultHttpClient() +} + +func (h *HttpJsonDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &HttpJsonDataSourcePlannerFactory{ + base: base, + client: h.httpClient(), + whitelistedSchemes: h.WhitelistedSchemes, + hooks: h.Hooks, + } + err := json.NewDecoder(configReader).Decode(&factory.config) + return factory, err +} + +type HttpJsonDataSourcePlannerFactory struct { + base BasePlanner + config HttpJsonDataSourceConfig + client *http.Client + whitelistedSchemes []string + hooks Hooks +} + +func (h *HttpJsonDataSourcePlannerFactory) DataSourcePlanner() Planner { + return &HttpJsonDataSourcePlanner{ + BasePlanner: h.base, + dataSourceConfig: h.config, + client: h.client, + whitelistedSchemes: h.whitelistedSchemes, + hooks: h.hooks, + } +} + +type HttpJsonDataSourcePlanner struct { + BasePlanner + dataSourceConfig HttpJsonDataSourceConfig + client *http.Client + whitelistedSchemes []string + hooks Hooks +} + +func (h *HttpJsonDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &HttpJsonDataSource{ + Log: h.Log, + Client: h.client, + WhitelistedSchemes: h.whitelistedSchemes, + Hooks: h.hooks, + }, append(h.Args, args...) +} + +func (h *HttpJsonDataSourcePlanner) EnterDocument(operation, definition *ast.Document) { + +} + +func (h *HttpJsonDataSourcePlanner) EnterInlineFragment(ref int) { + +} + +func (h *HttpJsonDataSourcePlanner) LeaveInlineFragment(ref int) { + +} + +func (h *HttpJsonDataSourcePlanner) EnterSelectionSet(ref int) { + +} + +func (h *HttpJsonDataSourcePlanner) LeaveSelectionSet(ref int) { + +} + +func (h *HttpJsonDataSourcePlanner) EnterField(ref int) { + if !h.RootField.isDefined { + h.RootField.SetIfNotDefined(ref) + + typeName := h.Definition.NodeNameBytes(h.Walker.EnclosingTypeDefinition) + fieldName := h.Operation.FieldNameBytes(ref) + + h.Args = append(h.Args, &StaticVariableArgument{ + Name: RootTypeName, + Value: typeName, + }) + + h.Args = append(h.Args, &StaticVariableArgument{ + Name: RootFieldName, + Value: fieldName, + }) + } +} + +func (h *HttpJsonDataSourcePlanner) EnterArgument(ref int) {} + +func (h *HttpJsonDataSourcePlanner) LeaveField(ref int) { + if !h.RootField.IsDefinedAndEquals(ref) { + return + } + definition, exists := h.Walker.FieldDefinition(ref) + if !exists { + return + } + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.URL, + Value: []byte(h.dataSourceConfig.URL), + }) + if h.dataSourceConfig.Method == nil { + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.METHOD, + Value: literal.HTTP_METHOD_GET, + }) + } else { + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.METHOD, + Value: []byte(*h.dataSourceConfig.Method), + }) + } + if h.dataSourceConfig.Body != nil { + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.BODY, + Value: []byte(*h.dataSourceConfig.Body), + }) + } + + if len(h.dataSourceConfig.Headers) != 0 { + listArg := &ListArgument{ + Name: literal.HEADERS, + } + for i := range h.dataSourceConfig.Headers { + listArg.Arguments = append(listArg.Arguments, &StaticVariableArgument{ + Name: []byte(h.dataSourceConfig.Headers[i].Key), + Value: []byte(h.dataSourceConfig.Headers[i].Value), + }) + } + h.Args = append(h.Args, listArg) + } + + // __typename + var typeNameValue []byte + var err error + fieldDefinitionTypeNode := h.Definition.FieldDefinitionTypeNode(definition) + fieldDefinitionType := h.Definition.FieldDefinitionType(definition) + fieldDefinitionTypeName := h.Definition.ResolveTypeNameBytes(fieldDefinitionType) + quotedFieldDefinitionTypeName := append(literal.QUOTE, append(fieldDefinitionTypeName, literal.QUOTE...)...) + switch fieldDefinitionTypeNode.Kind { + case ast.NodeKindScalarTypeDefinition: + return + case ast.NodeKindUnionTypeDefinition, ast.NodeKindInterfaceTypeDefinition: + if h.dataSourceConfig.DefaultTypeName != nil { + typeNameValue, err = sjson.SetRawBytes(typeNameValue, "defaultTypeName", []byte("\""+*h.dataSourceConfig.DefaultTypeName+"\"")) + if err != nil { + h.Log.Error("HttpJsonDataSourcePlanner set defaultTypeName (switch case union/interface)", log.Error(err)) + return + } + } + for i := range h.dataSourceConfig.StatusCodeTypeNameMappings { + typeNameValue, err = sjson.SetRawBytes(typeNameValue, strconv.Itoa(h.dataSourceConfig.StatusCodeTypeNameMappings[i].StatusCode), []byte("\""+h.dataSourceConfig.StatusCodeTypeNameMappings[i].TypeName+"\"")) + if err != nil { + h.Log.Error("HttpJsonDataSourcePlanner set statusCodeTypeMapping", log.Error(err)) + return + } + } + default: + typeNameValue, err = sjson.SetRawBytes(typeNameValue, "defaultTypeName", quotedFieldDefinitionTypeName) + if err != nil { + h.Log.Error("HttpJsonDataSourcePlanner set defaultTypeName (switch case default)", log.Error(err)) + return + } + } + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.TYPENAME, + Value: typeNameValue, + }) +} + +type HttpJsonDataSource struct { + Log log.Logger + Client *http.Client + WhitelistedSchemes []string + Hooks Hooks +} + +func (r *HttpJsonDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + urlArg := args.ByKey(literal.URL) + methodArg := args.ByKey(literal.METHOD) + bodyArg := args.ByKey(literal.BODY) + headersArg := args.ByKey(literal.HEADERS) + typeNameArg := args.ByKey(literal.TYPENAME) + rootTypeName := args.ByKey(RootTypeName) + rootFieldName := args.ByKey(RootFieldName) + hookContext := HookContext{ + TypeName: string(rootTypeName), + FieldName: string(rootFieldName), + } + + r.Log.Debug("HttpJsonDataSource.Resolve.Args", + log.Strings("resolvedArgs", args.Dump()), + ) + + switch { + case urlArg == nil: + r.Log.Error(fmt.Sprintf("arg '%s' must not be nil", string(literal.URL))) + return + case methodArg == nil: + r.Log.Error(fmt.Sprintf("arg '%s' must not be nil", string(literal.METHOD))) + return + } + + httpMethod := http.MethodGet + switch { + case bytes.Equal(methodArg, literal.HTTP_METHOD_GET): + httpMethod = http.MethodGet + case bytes.Equal(methodArg, literal.HTTP_METHOD_POST): + httpMethod = http.MethodPost + case bytes.Equal(methodArg, literal.HTTP_METHOD_PUT): + httpMethod = http.MethodPut + case bytes.Equal(methodArg, literal.HTTP_METHOD_DELETE): + httpMethod = http.MethodDelete + case bytes.Equal(methodArg, literal.HTTP_METHOD_PATCH): + httpMethod = http.MethodPatch + } + + parsedURL, rawURL, err := parseURLBytes(urlArg) + if err != nil { + r.Log.Error("HttpJsonDataSource.RawURL could not be parsed", log.String("rawURL", rawURL)) + return + } + + if len(parsedURL.Scheme) == 0 || !isWhitelistedScheme(parsedURL.Scheme, r.WhitelistedSchemes, httpJsonSchemes) { + parsedURL.Scheme = httpJsonSchemes[0] + } + + header := make(http.Header) + if len(headersArg) != 0 { + err := jsonparser.ObjectEach(headersArg, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + header.Set(string(key), string(value)) + return nil + }) + if err != nil { + r.Log.Error("accessing headers", log.Error(err)) + } + } + + r.Log.Debug("HttpJsonDataSource.Resolve", + log.String("rawURL", rawURL), + log.String("parsedURL", parsedURL.String()), + ) + + var bodyReader io.Reader + if len(bodyArg) != 0 { + bodyReader = bytes.NewReader(bodyArg) + } + + request, err := http.NewRequest(httpMethod, parsedURL.String(), bodyReader) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.NewRequest", + log.Error(err), + ) + return + } + + request.Header = header + + if r.Hooks.PreSendHttpHook != nil { + r.Hooks.PreSendHttpHook.Execute(hookContext, request) + } + + res, err := r.Client.Do(request) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.HTTPClient.Do", + log.Error(err), + ) + return + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.ioutil.ReadAll", + log.Error(err), + ) + return + } + + if r.Hooks.PostReceiveHttpHook != nil { + r.Hooks.PostReceiveHttpHook.Execute(hookContext, res, data) + } + + defer func() { + err := res.Body.Close() + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.Response.Body.Close", log.Error(err)) + } + }() + + statusCode := strconv.Itoa(res.StatusCode) + statusCodeTypeName := gjson.GetBytes(typeNameArg, statusCode) + defaultTypeName := gjson.GetBytes(typeNameArg, "defaultTypeName") + var result *gjson.Result + if statusCodeTypeName.Exists() { + result = &statusCodeTypeName + } + if result == nil && defaultTypeName.Exists() { + result = &defaultTypeName + } + + if result != nil { + parsed := gjson.ParseBytes(data) + if parsed.IsArray() { + arrayData := []byte(`[]`) + items := parsed.Array() + for i := range items { + item, err := sjson.SetRaw(items[i].Raw, "__typename", result.Raw) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.array.setDefaultTypeName", + log.Error(err), + ) + } + arrayData, err = sjson.SetRawBytes(arrayData, "-1", unsafebytes.StringToBytes(item)) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.array.setArrayItem", + log.Error(err), + ) + } + } + data = arrayData + } else { + data, err = sjson.SetRawBytes(data, "__typename", []byte(result.Raw)) + if err != nil { + r.Log.Error("HttpJsonDataSource.Resolve.setDefaultTypeName", + log.Error(err), + ) + return + } + } + } + + return out.Write(data) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_polling_stream.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_polling_stream.go new file mode 100644 index 00000000000..93612af7bee --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_http_polling_stream.go @@ -0,0 +1,252 @@ +package datasource + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" + "text/template" + "time" + + log "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type HttpPollingStreamDataSourceConfiguration struct { + Host string + URL string + DelaySeconds *int +} + +type HttpPollingStreamDataSourcePlannerFactoryFactory struct { +} + +func (h HttpPollingStreamDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &HttpPollingStreamDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type HttpPollingStreamDataSourcePlannerFactory struct { + base BasePlanner + config HttpPollingStreamDataSourceConfiguration +} + +func (h HttpPollingStreamDataSourcePlannerFactory) DataSourcePlanner() Planner { + return &HttpPollingStreamDataSourcePlanner{ + BasePlanner: h.base, + dataSourceConfig: h.config, + } +} + +type HttpPollingStreamDataSourcePlanner struct { + BasePlanner + dataSourceConfig HttpPollingStreamDataSourceConfiguration + delay time.Duration +} + +func (h *HttpPollingStreamDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &HttpPollingStreamDataSource{ + Log: h.Log, + Delay: h.delay, + }, append(h.Args, args...) +} + +func (h *HttpPollingStreamDataSourcePlanner) EnterDocument(operation, definition *ast.Document) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) EnterInlineFragment(ref int) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) LeaveInlineFragment(ref int) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) EnterSelectionSet(ref int) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) LeaveSelectionSet(ref int) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) EnterField(ref int) { + h.RootField.SetIfNotDefined(ref) +} + +func (h *HttpPollingStreamDataSourcePlanner) EnterArgument(ref int) { + +} + +func (h *HttpPollingStreamDataSourcePlanner) LeaveField(ref int) { + if !h.RootField.IsDefinedAndEquals(ref) { + return + } + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.HOST, + Value: []byte(h.dataSourceConfig.Host), + }) + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.URL, + Value: []byte(h.dataSourceConfig.URL), + }) + if h.dataSourceConfig.DelaySeconds == nil { + h.delay = time.Second * time.Duration(1) + } else { + h.delay = time.Second * time.Duration(*h.dataSourceConfig.DelaySeconds) + } +} + +type HttpPollingStreamDataSource struct { + Log log.Logger + once sync.Once + ch chan []byte + closed bool + Delay time.Duration + client *http.Client + request *http.Request + lastData []byte +} + +func (h *HttpPollingStreamDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + h.once.Do(func() { + h.ch = make(chan []byte) + h.request = h.generateRequest(args) + h.client = &http.Client{ + Timeout: time.Second * 5, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1024, + TLSHandshakeTimeout: 0 * time.Second, + }, + } + go h.startPolling(ctx) + }) + if h.closed { + return + } + select { + case data := <-h.ch: + h.Log.Debug("HttpPollingStreamDataSource.Resolve.out.Write", + log.ByteString("data", data), + ) + _, err := out.Write(data) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.Resolve", + log.Error(err), + ) + } + case <-ctx.Done(): + h.closed = true + return + } + return +} + +func (h *HttpPollingStreamDataSource) startPolling(ctx context.Context) { + first := true + for { + if first { + first = !first + } else { + time.Sleep(h.Delay) + } + var data []byte + select { + case <-ctx.Done(): + h.closed = true + return + default: + response, err := h.client.Do(h.request) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.startPolling.client.Do", + log.Error(err), + ) + return + } + data, err = ioutil.ReadAll(response.Body) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.startPolling.ioutil.ReadAll", + log.Error(err), + ) + return + } + } + if bytes.Equal(data, h.lastData) { + continue + } + h.lastData = data + select { + case <-ctx.Done(): + h.closed = true + return + case h.ch <- data: + continue + } + } +} + +func (h *HttpPollingStreamDataSource) generateRequest(args ResolverArgs) *http.Request { + hostArg := args.ByKey(literal.HOST) + urlArg := args.ByKey(literal.URL) + + h.Log.Debug("HttpPollingStreamDataSource.generateRequest.Resolve.Args", + log.Strings("resolvedArgs", args.Dump()), + ) + + if hostArg == nil || urlArg == nil { + h.Log.Error("HttpPollingStreamDataSource.generateRequest.Args invalid") + return nil + } + + url := string(hostArg) + string(urlArg) + if !strings.HasPrefix(url, "https://") && !strings.HasPrefix(url, "http://") { + url = "https://" + url + } + + if strings.Contains(url, "{{") { + tmpl, err := template.New("url").Parse(url) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.generateRequest.template.New", + log.Error(err), + ) + return nil + } + out := bytes.Buffer{} + keys := args.Keys() + data := make(map[string]string, len(keys)) + for i := 0; i < len(keys); i++ { + data[string(keys[i])] = string(args.ByKey(keys[i])) + } + err = tmpl.Execute(&out, data) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.generateRequest.tmpl.Execute", + log.Error(err), + ) + return nil + } + url = out.String() + } + + h.Log.Debug("HttpPollingStreamDataSource.generateRequest.Resolve", + log.String("url", url), + ) + + request, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + h.Log.Error("HttpPollingStreamDataSource.generateRequest.Resolve.NewRequest", + log.Error(err), + ) + return nil + } + request.Header.Add("Accept", "application/json") + return request +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_mqtt.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_mqtt.go new file mode 100644 index 00000000000..db06011e178 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_mqtt.go @@ -0,0 +1,178 @@ +package datasource + +import ( + "context" + "encoding/json" + "io" + "sync" + "time" + + mqtt "github.com/eclipse/paho.mqtt.golang" + log "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type MQTTDataSourceConfig struct { + BrokerAddr string + ClientID string + Topic string +} + +type MQTTDataSourcePlannerFactoryFactory struct { +} + +func (M MQTTDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &MQTTDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type MQTTDataSourcePlannerFactory struct { + base BasePlanner + config MQTTDataSourceConfig +} + +func (m MQTTDataSourcePlannerFactory) DataSourcePlanner() Planner { + return &MQTTDataSourcePlanner{ + BasePlanner: m.base, + dataSourceConfig: m.config, + } +} + +type MQTTDataSourcePlanner struct { + BasePlanner + dataSourceConfig MQTTDataSourceConfig +} + +func (n *MQTTDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &MQTTDataSource{ + log: n.Log, + }, append(n.Args, args...) +} + +func (n *MQTTDataSourcePlanner) EnterDocument(operation, definition *ast.Document) { + +} + +func (n *MQTTDataSourcePlanner) EnterInlineFragment(ref int) { + +} + +func (n *MQTTDataSourcePlanner) LeaveInlineFragment(ref int) { + +} + +func (n *MQTTDataSourcePlanner) EnterSelectionSet(ref int) { + +} + +func (n *MQTTDataSourcePlanner) LeaveSelectionSet(ref int) { + +} + +func (n *MQTTDataSourcePlanner) EnterField(ref int) { + n.RootField.SetIfNotDefined(ref) +} + +func (n *MQTTDataSourcePlanner) EnterArgument(ref int) { + +} + +func (n *MQTTDataSourcePlanner) LeaveField(ref int) { + if !n.RootField.IsDefinedAndEquals(ref) { + return + } + n.Args = append(n.Args, &StaticVariableArgument{ + Name: literal.BROKERADDR, + Value: []byte(n.dataSourceConfig.BrokerAddr), + }) + n.Args = append(n.Args, &StaticVariableArgument{ + Name: literal.CLIENTID, + Value: []byte(n.dataSourceConfig.ClientID), + }) + n.Args = append(n.Args, &StaticVariableArgument{ + Name: literal.TOPIC, + Value: []byte(n.dataSourceConfig.Topic), + }) +} + +type MQTTDataSource struct { + log log.Logger + once sync.Once + ch chan mqtt.Message + client mqtt.Client +} + +func (m *MQTTDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + + defer func() { + select { + case <-ctx.Done(): + m.log.Debug("MQTTDataSource.Resolve.client.Disconnect") + m.client.Disconnect(250) + m.log.Debug("MQTTDataSource.Resolve.client.Disconnect.disconnected") + default: + return + } + }() + + m.once.Do(func() { + + brokerArg := args.ByKey(literal.BROKERADDR) + clientIDArg := args.ByKey(literal.CLIENTID) + topicArg := args.ByKey(literal.TOPIC) + + m.log.Debug("MQTTDataSource.Resolve.init", + log.String("broker", string(brokerArg)), + log.String("clientID", string(clientIDArg)), + log.String("topic", string(topicArg)), + ) + + m.ch = make(chan mqtt.Message) + m.start(string(brokerArg), string(clientIDArg), string(topicArg)) + }) + + select { + case <-ctx.Done(): + return + case msg, ok := <-m.ch: + if !ok { + return + } + return out.Write(msg.Payload()) + } +} + +func (m *MQTTDataSource) start(brokerAddr, clientID, topic string) { + mqtt.ERROR = m.log.LevelLogger(log.ErrorLevel) + mqtt.DEBUG = m.log.LevelLogger(log.DebugLevel) + opts := mqtt.NewClientOptions().AddBroker(brokerAddr).SetClientID(clientID) + opts.SetKeepAlive(5 * time.Second) + opts.SetResumeSubs(true) + opts.SetAutoReconnect(true) + opts.SetDefaultPublishHandler(func(client mqtt.Client, msg mqtt.Message) { + m.ch <- msg + msg.Ack() + }) + opts.SetPingTimeout(5 * time.Second) + + m.client = mqtt.NewClient(opts) + if token := m.client.Connect(); token.Wait() && token.Error() != nil { + m.log.Error("MQTTDataSource.start.Connect", + log.Error(token.Error()), + ) + close(m.ch) + return + } + + if token := m.client.Subscribe(topic, 0, nil); token.Wait() && token.Error() != nil { + m.log.Error("MQTTDataSource.start.Subscribe", + log.Error(token.Error()), + ) + close(m.ch) + return + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_nats.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_nats.go new file mode 100644 index 00000000000..1437948ed2e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_nats.go @@ -0,0 +1,134 @@ +package datasource + +import ( + "context" + "encoding/json" + "io" + "sync" + "time" + + log "github.com/jensneuse/abstractlogger" + "github.com/nats-io/nats.go" + + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type NatsDataSourceConfig struct { + Addr string + Topic string +} + +type NatsDataSourcePlannerFactoryFactory struct { +} + +func (n NatsDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &NatsDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type NatsDataSourcePlannerFactory struct { + base BasePlanner + config NatsDataSourceConfig +} + +func (n NatsDataSourcePlannerFactory) DataSourcePlanner() Planner { + return SimpleDataSourcePlanner(&NatsDataSourcePlanner{ + BasePlanner: n.base, + dataSourceConfig: n.config, + }) +} + +type NatsDataSourcePlanner struct { + BasePlanner + dataSourceConfig NatsDataSourceConfig +} + +func (n *NatsDataSourcePlanner) Plan([]Argument) (DataSource, []Argument) { + n.Args = append(n.Args, &StaticVariableArgument{ + Name: literal.ADDR, + Value: []byte(n.dataSourceConfig.Addr), + }) + n.Args = append(n.Args, &StaticVariableArgument{ + Name: literal.TOPIC, + Value: []byte(n.dataSourceConfig.Topic), + }) + return &NatsDataSource{ + log: n.Log, + }, n.Args +} + +type NatsDataSource struct { + log log.Logger + conn *nats.Conn + sub *nats.Subscription + once sync.Once +} + +func (d *NatsDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + d.once.Do(func() { + + addrArg := args.ByKey(literal.ADDR) + topicArg := args.ByKey(literal.TOPIC) + + addr := nats.DefaultURL + topic := string(topicArg) + + if len(addrArg) != 0 { + addr = string(addrArg) + } + + go func() { + <-ctx.Done() + if d.sub != nil { + d.log.Debug("NatsDataSource.unsubscribing", + log.String("addr", addr), + log.String("topic", topic), + ) + err := d.sub.Unsubscribe() + if err != nil { + d.log.Error("Unsubscribe", log.Error(err)) + } + } + if d.conn != nil { + d.log.Debug("NatsDataSource.closing", + log.String("addr", addr), + log.String("topic", topic), + ) + d.conn.Close() + } + }() + + d.log.Debug("NatsDataSource.connecting", + log.String("addr", addr), + log.String("topic", topic), + ) + + d.conn, err = nats.Connect(addr) + if err != nil { + panic(err) + } + + d.log.Debug("NatsDataSource.subscribing", + log.String("addr", addr), + log.String("topic", topic), + ) + + d.sub, err = d.conn.SubscribeSync(topic) + if err != nil { + panic(err) + } + }) + + if err != nil { + return n, err + } + + message, err := d.sub.NextMsg(time.Minute) + if err != nil { + return n, err + } + + return out.Write(message.Data) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_pipeline.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_pipeline.go new file mode 100644 index 00000000000..0891ef4cc03 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_pipeline.go @@ -0,0 +1,142 @@ +package datasource + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + + log "github.com/jensneuse/abstractlogger" + "github.com/jensneuse/pipeline/pkg/pipe" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type PipelineDataSourceConfig struct { + /* + ConfigFilePath is the path where the Pipeline configuration file can be found + it needs to be in the json format according to the Pipeline json schema + see this url for more info: https://github.com/jensneuse/pipeline + */ + ConfigFilePath *string + /* + ConfigString is a string to configure the Pipeline + it needs to be in the json format according to the Pipeline json schema + see this url for more info: https://github.com/jensneuse/pipeline + The PipelinDataSourcePlanner will always choose the configString over the configFilePath in case both are defined. + */ + ConfigString *string + // InputJSON is the template to define a JSON object based on the request, parameters etc. which gets passed to the first Pipeline step + InputJSON string +} + +type PipelineDataSourcePlannerFactoryFactory struct { +} + +func (p PipelineDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &PipelineDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type PipelineDataSourcePlannerFactory struct { + base BasePlanner + config PipelineDataSourceConfig +} + +func (p PipelineDataSourcePlannerFactory) DataSourcePlanner() Planner { + return &PipelineDataSourcePlanner{ + BasePlanner: p.base, + dataSourceConfig: p.config, + } +} + +type PipelineDataSourcePlanner struct { + BasePlanner + dataSourceConfig PipelineDataSourceConfig + rawPipelineConfig []byte +} + +func (h *PipelineDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + + source := PipelineDataSource{ + Log: h.Log, + } + + err := source.Pipeline.FromConfig(bytes.NewReader(h.rawPipelineConfig)) + if err != nil { + h.Log.Error("PipelineDataSourcePlanner.pipe.FromConfig", log.Error(err)) + } + + return &source, append(h.Args, args...) +} + +func (h *PipelineDataSourcePlanner) EnterDocument(operation, definition *ast.Document) { + +} + +func (h *PipelineDataSourcePlanner) EnterInlineFragment(ref int) { + +} + +func (h *PipelineDataSourcePlanner) LeaveInlineFragment(ref int) { + +} + +func (h *PipelineDataSourcePlanner) EnterSelectionSet(ref int) { + +} + +func (h *PipelineDataSourcePlanner) LeaveSelectionSet(ref int) { + +} + +func (h *PipelineDataSourcePlanner) EnterField(ref int) { + h.RootField.SetIfNotDefined(ref) +} + +func (h *PipelineDataSourcePlanner) EnterArgument(ref int) { + +} + +func (h *PipelineDataSourcePlanner) LeaveField(ref int) { + if !h.RootField.IsDefinedAndEquals(ref) { + return + } + + if h.dataSourceConfig.ConfigString != nil { + h.rawPipelineConfig = []byte(*h.dataSourceConfig.ConfigString) + } + if h.dataSourceConfig.ConfigFilePath != nil { + var err error + h.rawPipelineConfig, err = ioutil.ReadFile(*h.dataSourceConfig.ConfigFilePath) + if err != nil { + h.Log.Error("PipelineDataSourcePlanner.readConfigFile", log.Error(err)) + } + } + + h.Args = append(h.Args, &StaticVariableArgument{ + Name: literal.INPUT_JSON, + Value: []byte(h.dataSourceConfig.InputJSON), + }) +} + +type PipelineDataSource struct { + Log log.Logger + Pipeline pipe.Pipeline +} + +func (r *PipelineDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + + inputJSON := args.ByKey(literal.INPUT_JSON) + + err = r.Pipeline.Run(bytes.NewReader(inputJSON), out) + if err != nil { + r.Log.Error("PipelineDataSource.pipe.Run", log.Error(err)) + } + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_schema.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_schema.go new file mode 100644 index 00000000000..bce11934f04 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_schema.go @@ -0,0 +1,66 @@ +package datasource + +import ( + "context" + "encoding/json" + "io" + + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type SchemaDataSourcePlannerConfig struct { +} + +type SchemaDataSourcePlannerFactoryFactory struct { +} + +func (s SchemaDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &SchemaDataSourcePlannerFactory{ + base: base, + } + err := json.NewDecoder(configReader).Decode(&factory.config) + if err != nil { + return factory, err + } + gen := introspection.NewGenerator() + var data introspection.Data + var report operationreport.Report + gen.Generate(base.Definition, &report, &data) + factory.schemaBytes, err = json.Marshal(data) + return factory, err +} + +type SchemaDataSourcePlannerFactory struct { + base BasePlanner + config SchemaDataSourcePlannerConfig + schemaBytes []byte +} + +func (s SchemaDataSourcePlannerFactory) DataSourcePlanner() Planner { + return SimpleDataSourcePlanner(&SchemaDataSourcePlanner{ + BasePlanner: s.base, + dataSourceConfig: s.config, + schemaBytes: s.schemaBytes, + }) +} + +type SchemaDataSourcePlanner struct { + BasePlanner + dataSourceConfig SchemaDataSourcePlannerConfig + schemaBytes []byte +} + +func (s *SchemaDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &SchemaDataSource{ + SchemaBytes: s.schemaBytes, + }, append(s.Args, args...) +} + +type SchemaDataSource struct { + SchemaBytes []byte +} + +func (s *SchemaDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + return out.Write(s.SchemaBytes) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_static.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_static.go new file mode 100644 index 00000000000..ef8f83420ae --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_static.go @@ -0,0 +1,52 @@ +package datasource + +import ( + "context" + "encoding/json" + "io" +) + +type StaticDataSourceConfig struct { + Data string +} + +type StaticDataSourcePlannerFactoryFactory struct { +} + +func (s StaticDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := &StaticDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type StaticDataSourcePlannerFactory struct { + base BasePlanner + config StaticDataSourceConfig +} + +func (s StaticDataSourcePlannerFactory) DataSourcePlanner() Planner { + return SimpleDataSourcePlanner(&StaticDataSourcePlanner{ + BasePlanner: s.base, + dataSourceConfig: s.config, + }) +} + +type StaticDataSourcePlanner struct { + BasePlanner + dataSourceConfig StaticDataSourceConfig +} + +func (s *StaticDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &StaticDataSource{ + Data: []byte(s.dataSourceConfig.Data), + }, append(s.Args, args...) +} + +type StaticDataSource struct { + Data []byte +} + +func (s StaticDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + return out.Write(s.Data) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_type.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_type.go new file mode 100644 index 00000000000..5e42207b241 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/datasource_type.go @@ -0,0 +1,48 @@ +package datasource + +import ( + "context" + "encoding/json" + "io" +) + +type TypeDataSourcePlannerConfig struct { +} + +type TypeDataSourcePlannerFactoryFactory struct { +} + +func (t TypeDataSourcePlannerFactoryFactory) Initialize(base BasePlanner, configReader io.Reader) (PlannerFactory, error) { + factory := TypeDataSourcePlannerFactory{ + base: base, + } + return factory, json.NewDecoder(configReader).Decode(&factory.config) +} + +type TypeDataSourcePlannerFactory struct { + base BasePlanner + config TypeDataSourcePlannerConfig +} + +func (t TypeDataSourcePlannerFactory) DataSourcePlanner() Planner { + return SimpleDataSourcePlanner(&TypeDataSourcePlanner{ + BasePlanner: t.base, + dataSourceConfig: t.config, + }) +} + +type TypeDataSourcePlanner struct { + BasePlanner + dataSourceConfig TypeDataSourcePlannerConfig +} + +func (t *TypeDataSourcePlanner) Plan(args []Argument) (DataSource, []Argument) { + return &TypeDataSource{}, append(t.Args, args...) +} + +type TypeDataSource struct { +} + +func (t *TypeDataSource) Resolve(ctx context.Context, args ResolverArgs, out io.Writer) (n int, err error) { + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/hooks.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/hooks.go new file mode 100644 index 00000000000..ccd397e3d64 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource/hooks.go @@ -0,0 +1,23 @@ +package datasource + +import ( + "net/http" +) + +type HookContext struct { + TypeName string + FieldName string +} + +type Hooks struct { + PreSendHttpHook PreSendHttpHook + PostReceiveHttpHook PostReceiveHttpHook +} + +type PreSendHttpHook interface { + Execute(ctx HookContext, req *http.Request) +} + +type PostReceiveHttpHook interface { + Execute(ctx HookContext, resp *http.Response, body []byte) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource_config.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource_config.go new file mode 100644 index 00000000000..04416637978 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource_config.go @@ -0,0 +1,417 @@ +// Code generated by graphql-go-tools gen, DO NOT EDIT. +package execution + +import ast "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + +type GraphQLDataSourceConfig struct { + Host string + Url string + Method HTTP_METHOD + Params *[]*Parameter +} + +func (g *GraphQLDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "host": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + g.Host = val + case "url": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + g.Url = val + case "method": + var val HTTP_METHOD + val.Unmarshal(doc, doc.ArgumentValue(ii).Ref) + g.Method = val + case "params": + list := make([]*Parameter, 0, len(doc.ListValues[doc.ArgumentValue(ii).Ref].Refs)) + for _, ii := range doc.ListValues[doc.ArgumentValue(ii).Ref].Refs { + var val Parameter + val.Unmarshal(doc, doc.Value(ii).Ref) + list = append(list, &val) + } + g.Params = &list + } + } +} + +type HttpJsonDataSourceConfig struct { + Host string + Url string + Method HTTP_METHOD + Params *[]*Parameter + Body *string + Headers *[]*Header + DefaultTypeName *string + StatusCodeTypeNameMappings *[]*StatusCodeTypeNameMapping +} + +func (h *HttpJsonDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "host": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.Host = val + case "url": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.Url = val + case "method": + var val HTTP_METHOD + val.Unmarshal(doc, doc.ArgumentValue(ii).Ref) + h.Method = val + case "params": + list := make([]*Parameter, 0, len(doc.ListValues[doc.ArgumentValue(ii).Ref].Refs)) + for _, ii := range doc.ListValues[doc.ArgumentValue(ii).Ref].Refs { + var val Parameter + val.Unmarshal(doc, doc.Value(ii).Ref) + list = append(list, &val) + } + h.Params = &list + case "body": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.Body = &val + case "headers": + list := make([]*Header, 0, len(doc.ListValues[doc.ArgumentValue(ii).Ref].Refs)) + for _, ii := range doc.ListValues[doc.ArgumentValue(ii).Ref].Refs { + var val Header + val.Unmarshal(doc, doc.Value(ii).Ref) + list = append(list, &val) + } + h.Headers = &list + case "defaultTypeName": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.DefaultTypeName = &val + case "statusCodeTypeNameMappings": + list := make([]*StatusCodeTypeNameMapping, 0, len(doc.ListValues[doc.ArgumentValue(ii).Ref].Refs)) + for _, ii := range doc.ListValues[doc.ArgumentValue(ii).Ref].Refs { + var val StatusCodeTypeNameMapping + val.Unmarshal(doc, doc.Value(ii).Ref) + list = append(list, &val) + } + h.StatusCodeTypeNameMappings = &list + } + } +} + +type HttpPollingStreamDataSourceConfig struct { + Host string + Url string + Method HTTP_METHOD + DelaySeconds int64 + Params *[]*Parameter +} + +func (h *HttpPollingStreamDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + h.DelaySeconds = doc.DirectiveDefinitionArgumentDefaultValueInt64("HttpPollingStreamDataSource", "delaySeconds") + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "host": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.Host = val + case "url": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + h.Url = val + case "method": + var val HTTP_METHOD + val.Unmarshal(doc, doc.ArgumentValue(ii).Ref) + h.Method = val + case "delaySeconds": + val := doc.IntValueAsInt(doc.ArgumentValue(ii).Ref) + h.DelaySeconds = val + case "params": + list := make([]*Parameter, 0, len(doc.ListValues[doc.ArgumentValue(ii).Ref].Refs)) + for _, ii := range doc.ListValues[doc.ArgumentValue(ii).Ref].Refs { + var val Parameter + val.Unmarshal(doc, doc.Value(ii).Ref) + list = append(list, &val) + } + h.Params = &list + } + } +} + +type MappingConfig struct { + Mode MAPPING_MODE + PathSelector *string +} + +func (m *MappingConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "mode": + var val MAPPING_MODE + val.Unmarshal(doc, doc.ArgumentValue(ii).Ref) + m.Mode = val + case "pathSelector": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + m.PathSelector = &val + } + } +} + +type MQTTDataSourceConfig struct { + BrokerAddr string + ClientID string + Topic string +} + +func (m *MQTTDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "brokerAddr": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + m.BrokerAddr = val + case "clientID": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + m.ClientID = val + case "topic": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + m.Topic = val + } + } +} + +type NatsDataSourceConfig struct { + Addr string + Topic string +} + +func (n *NatsDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "addr": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + n.Addr = val + case "topic": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + n.Topic = val + } + } +} + +type PipelineDataSourceConfig struct { + ConfigFilePath *string + ConfigString *string + InputJSON string +} + +func (p *PipelineDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "configFilePath": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + p.ConfigFilePath = &val + case "configString": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + p.ConfigString = &val + case "inputJSON": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + p.InputJSON = val + } + } +} + +type StaticDataSourceConfig struct { + Data *string +} + +func (s *StaticDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "data": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + s.Data = &val + } + } +} + +type TransformationConfig struct { + Mode TRANSFORMATION_MODE + PipelineConfigFile *string + PipelineConfigString *string +} + +func (t *TransformationConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "mode": + var val TRANSFORMATION_MODE + val.Unmarshal(doc, doc.ArgumentValue(ii).Ref) + t.Mode = val + case "pipelineConfigFile": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + t.PipelineConfigFile = &val + case "pipelineConfigString": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + t.PipelineConfigString = &val + } + } +} + +type WasmDataSourceConfig struct { + Input string + WasmFile string +} + +func (w *WasmDataSourceConfig) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.Directives[ref].Arguments.Refs { + name := doc.ArgumentNameString(ii) + switch name { + case "input": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + w.Input = val + case "wasmFile": + val := doc.StringValueContentString(doc.ArgumentValue(ii).Ref) + w.WasmFile = val + } + } +} + +type HTTP_METHOD int + +func (h *HTTP_METHOD) Unmarshal(doc *ast.Document, ref int) { + switch doc.EnumValueNameString(ref) { + case "GET": + *h = HTTP_METHOD_GET + case "POST": + *h = HTTP_METHOD_POST + case "UPDATE": + *h = HTTP_METHOD_UPDATE + case "DELETE": + *h = HTTP_METHOD_DELETE + } +} + +const ( + UNDEFINED_HTTP_METHOD HTTP_METHOD = iota + HTTP_METHOD_GET + HTTP_METHOD_POST + HTTP_METHOD_UPDATE + HTTP_METHOD_DELETE +) + +type MAPPING_MODE int + +func (m *MAPPING_MODE) Unmarshal(doc *ast.Document, ref int) { + switch doc.EnumValueNameString(ref) { + case "NONE": + *m = MAPPING_MODE_NONE + case "PATH_SELECTOR": + *m = MAPPING_MODE_PATH_SELECTOR + } +} + +const ( + UNDEFINED_MAPPING_MODE MAPPING_MODE = iota + MAPPING_MODE_NONE + MAPPING_MODE_PATH_SELECTOR +) + +type PARAMETER_SOURCE int + +func (p *PARAMETER_SOURCE) Unmarshal(doc *ast.Document, ref int) { + switch doc.EnumValueNameString(ref) { + case "CONTEXT_VARIABLE": + *p = PARAMETER_SOURCE_CONTEXT_VARIABLE + case "OBJECT_VARIABLE_ARGUMENT": + *p = PARAMETER_SOURCE_OBJECT_VARIABLE_ARGUMENT + case "FIELD_ARGUMENTS": + *p = PARAMETER_SOURCE_FIELD_ARGUMENTS + } +} + +const ( + UNDEFINED_PARAMETER_SOURCE PARAMETER_SOURCE = iota + PARAMETER_SOURCE_CONTEXT_VARIABLE + PARAMETER_SOURCE_OBJECT_VARIABLE_ARGUMENT + PARAMETER_SOURCE_FIELD_ARGUMENTS +) + +type TRANSFORMATION_MODE int + +func (t *TRANSFORMATION_MODE) Unmarshal(doc *ast.Document, ref int) { + switch doc.EnumValueNameString(ref) { + case "PIPELINE": + *t = TRANSFORMATION_MODE_PIPELINE + } +} + +const ( + UNDEFINED_TRANSFORMATION_MODE TRANSFORMATION_MODE = iota + TRANSFORMATION_MODE_PIPELINE +) + +type Header struct { + Key string + Value string +} + +func (h *Header) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.ObjectValues[ref].Refs { + name := string(doc.ObjectFieldNameBytes(ii)) + switch name { + case "key": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + h.Key = val + case "value": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + h.Value = val + } + } +} + +type Parameter struct { + Name string + SourceKind PARAMETER_SOURCE + SourceName string + VariableType string +} + +func (p *Parameter) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.ObjectValues[ref].Refs { + name := string(doc.ObjectFieldNameBytes(ii)) + switch name { + case "name": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + p.Name = val + case "sourceKind": + var val PARAMETER_SOURCE + val.Unmarshal(doc, doc.ObjectFieldValue(ii).Ref) + p.SourceKind = val + case "sourceName": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + p.SourceName = val + case "variableType": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + p.VariableType = val + } + } +} + +type StatusCodeTypeNameMapping struct { + StatusCode int64 + TypeName string +} + +func (s *StatusCodeTypeNameMapping) Unmarshal(doc *ast.Document, ref int) { + for _, ii := range doc.ObjectValues[ref].Refs { + name := string(doc.ObjectFieldNameBytes(ii)) + switch name { + case "statusCode": + val := doc.IntValueAsInt(doc.ObjectFieldValue(ii).Ref) + s.StatusCode = val + case "typeName": + val := doc.StringValueContentString(doc.ObjectFieldValue(ii).Ref) + s.TypeName = val + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/execution.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/execution.go new file mode 100644 index 00000000000..dcf1071d0ca --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/execution.go @@ -0,0 +1,613 @@ +// Package execution is a complete GraphQL runtime. +// It contains a Handler to orchestrate the execution, a Query Planner to generate a Query Plan from an AST as well as the Executor to execute a Query Plan. +package execution + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + byte_template "github.com/jensneuse/byte-template" + "github.com/tidwall/gjson" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes" +) + +type Executor struct { + context Context + out io.Writer + err error + buffers LockableBufferMap + escapeBuf [48]byte + templateDirectives []byte_template.DirectiveDefinition +} + +type LockableBufferMap struct { + sync.Mutex + Buffers map[uint64]*bytes.Buffer +} + +func NewExecutor(templateDirectives []byte_template.DirectiveDefinition) *Executor { + return &Executor{ + buffers: LockableBufferMap{ + Buffers: map[uint64]*bytes.Buffer{}, + }, + templateDirectives: templateDirectives, + } +} + +func (e *Executor) Execute(ctx Context, node RootNode, w io.Writer) error { + e.context = ctx + e.out = w + e.err = nil + + var path string + switch node.OperationType() { + case ast.OperationTypeQuery: + path = "query" + case ast.OperationTypeMutation: + path = "mutation" + case ast.OperationTypeSubscription: + path = "subscription" + } + e.resolveNode(node, nil, path, nil, true) + return e.err +} + +// write writes the data to the out io.Writer if there is no error previously captured +func (e *Executor) write(data []byte) { + if e.err != nil { + return + } + _, e.err = e.out.Write(data) +} + +// writeQuoted quotes and writes the data to the out io.Writer if there is no error previously captured +func (e *Executor) writeQuoted(data []byte) { + e.write(literal.QUOTE) + e.write(data) + e.write(literal.QUOTE) +} + +func (e *Executor) resolveNode(node Node, data []byte, path string, prefetch *sync.WaitGroup, shouldFetch bool) { + + switch node := node.(type) { + case *Object: + if data != nil { // in case data is not nil apply any path selection/transformation and return early if there is no data + data = e.resolveData(node.DataResolvingConfig, data) + if data == nil || bytes.Equal(data, literal.NULL) { + e.write(literal.NULL) + return + } + } + if shouldFetch && node.Fetch != nil { // execute the fetch on the object + _, err := node.Fetch.Fetch(e.context, data, e, path, &e.buffers) + if err != nil { + e.err = err + } + if prefetch != nil { // in case this was a prefetch we can immediately return + prefetch.Done() + return + } + } + e.write(literal.LBRACE) // start writing the object + hasPreviousValue := false + for i := 0; i < len(node.Fields); i++ { + if node.Fields[i].Skip != nil { + if node.Fields[i].Skip.Evaluate(e.context, data) { + continue + } + } + if hasPreviousValue { // separate all values with a comma in case we have at least one previous (unskipped field) + e.write(literal.COMMA) + } + hasPreviousValue = true + e.resolveNode(&node.Fields[i], data, path, nil, true) // recursively evaluate all fields + } + e.write(literal.RBRACE) // end writing the object + case *Field: + path = path + "." + unsafebytes.BytesToString(node.Name) // add the node name to the path using a "." as separator + if node.HasResolvedData { // in case this field has associated resolved data we have to fetch it from the buffer + if buf := e.buffers.Buffers[xxhash.Sum64String(path)]; buf != nil { + data = buf.Bytes() + } + } + e.writeQuoted(node.Name) + e.write(literal.COLON) + if data == nil && !node.Value.HasResolversRecursively() { + e.write(literal.NULL) + return + } + e.resolveNode(node.Value, data, path, nil, true) + case *Value: + data = e.resolveData(node.DataResolvingConfig, data) + _, e.err = node.ValueType.writeValue(data, e.escapeBuf[:], e.out) + return + case *List: + data = e.resolveData(node.DataResolvingConfig, data) + if len(data) == 0 { + e.write(literal.NULL) + return + } + shouldPrefetch := false + switch object := node.Value.(type) { + case *Object: + if object.Fetch != nil { + shouldPrefetch = true + } + } + result := gjson.ParseBytes(data).Array() + listItems := make([][]byte, len(result)) + for i := range result { + if result[i].Type == gjson.String { + listItems[i] = unsafebytes.StringToBytes(result[i].Str) + } else { + listItems[i] = unsafebytes.StringToBytes(result[i].Raw) + } + } + path = path + "." + maxItems := len(listItems) + if node.Filter != nil { + switch filter := node.Filter.(type) { + case *ListFilterFirstN: + if maxItems > filter.FirstN { + maxItems = filter.FirstN + } + } + } + if shouldPrefetch { + wg := &sync.WaitGroup{} + for i := 0; i < maxItems; i++ { + wg.Add(1) + go e.resolveNode(node.Value, listItems[i], path+strconv.Itoa(i), wg, true) + } + wg.Wait() + } + i := 0 + for i = 0; i < maxItems; i++ { + if i == 0 { + e.write(literal.LBRACK) + } else { + e.write(literal.COMMA) + } + e.resolveNode(node.Value, listItems[i], path+strconv.Itoa(i), nil, false) + } + if i == 0 || e.err == jsonparser.KeyPathNotFoundError { + e.err = nil + e.write(literal.LBRACK) + } + e.write(literal.RBRACK) + } +} + +func (e *Executor) resolveData(config DataResolvingConfig, data []byte) []byte { + if len(data) == 0 { + return nil + } + if config.PathSelector.Path == "" { + return data + } + result := gjson.GetBytes(data, config.PathSelector.Path) + if config.Transformation == nil && result.Type == gjson.String { + data = unsafebytes.StringToBytes(result.Str) + } else { + data = unsafebytes.StringToBytes(result.Raw) + } + if config.Transformation == nil { + return data + } + data, e.err = config.Transformation.Transform(data) + return data +} + +func (e *Executor) ResolveArgs(args []datasource.Argument, data []byte) ResolvedArgs { + + args = append(args, e.context.ExtraArguments...) + + resolved := make(ResolvedArgs, len(args)) + for i := 0; i < len(args); i++ { + switch arg := args[i].(type) { + case *datasource.StaticVariableArgument: + resolved[i].Key = arg.Name + resolved[i].Value = arg.Value + case *datasource.ObjectVariableArgument: + resolved[i].Key = arg.Name + result := gjson.GetBytes(data, arg.PathSelector.Path) + resolved[i].Value = unsafebytes.StringToBytes(result.Raw) + case *datasource.ContextVariableArgument: + resolved[i].Key = arg.Name + resolved[i].Value = e.context.Variables[xxhash.Sum64(arg.VariableName)] + case *datasource.ListArgument: + resolved[i].Key = arg.Name + listArgs := e.ResolveArgs(arg.Arguments, data) + listValues := make(map[string]string, len(listArgs)) + for j := range listArgs { + listValues[string(listArgs[j].Key)] = string(listArgs[j].Value) + } + resolved[i].Value, _ = json.Marshal(listValues) + } + } + + buf := bytes.Buffer{} + tmpl := byte_template.New(e.templateDirectives...) + + for i := range resolved { + if !bytes.Contains(resolved[i].Value, literal.DOUBLE_LBRACE) { + continue + } + _, err := tmpl.Execute(&buf, resolved[i].Value, func(w io.Writer, path []byte) (n int, err error) { + path = bytes.TrimFunc(path, func(r rune) bool { + return r == runes.SPACE || r == runes.TAB || r == runes.LINETERMINATOR + }) + if bytes.Count(path, literal.DOT) == 1 { + path = bytes.TrimPrefix(path, literal.DOT) + for j := range resolved { + if bytes.Equal(resolved[j].Key, path) { + return w.Write(resolved[j].Value) + } + } + } + if bytes.HasPrefix(path, literal.DOT_OBJECT_DOT) { + path = bytes.TrimPrefix(path, literal.DOT_OBJECT_DOT) + result := gjson.GetBytes(data, unsafebytes.BytesToString(path)) + if result.Type == gjson.String { + return w.Write(unsafebytes.StringToBytes(result.Str)) + } + return w.Write(unsafebytes.StringToBytes(result.Raw)) + } + for j := range resolved { + key := resolved[j].Key + if bytes.HasPrefix(path, literal.DOT) && !bytes.HasPrefix(key, literal.DOT) { + key = append(literal.DOT, key...) + } + if !bytes.HasPrefix(path, key) { + continue + } + key = bytes.TrimPrefix(path, key) + if len(key) == 0 { + return w.Write(resolved[j].Value) + } + key = bytes.TrimPrefix(key, literal.DOT) + + result := gjson.GetBytes(resolved[j].Value, unsafebytes.BytesToString(key)) + + if result.Type == gjson.String { + resultBytes := unsafebytes.StringToBytes(strings.Trim(strconv.Quote(result.Str), `"`)) + return w.Write(resultBytes) + } + + rawResultBytes := unsafebytes.StringToBytes(result.Raw) + return w.Write(rawResultBytes) + } + _, _ = w.Write(literal.LBRACE) + _, _ = w.Write(literal.LBRACE) + _, _ = w.Write(literal.SPACE) + _, _ = w.Write(path) + _, _ = w.Write(literal.SPACE) + _, _ = w.Write(literal.RBRACE) + return w.Write(literal.RBRACE) + }) + if err == nil { + value := buf.Bytes() + resolved[i].Value = make([]byte, len(value)) + copy(resolved[i].Value, value) + buf.Reset() + } + } + + resolved.Filter(func(i int) (keep bool) { + return !bytes.HasPrefix(resolved[i].Key, literal.DOT) + }) + + return resolved +} + +const ( + ObjectKind NodeKind = iota + 1 + FieldKind + ListKind + ValueKind +) + +type NodeKind int + +type Node interface { + // Kind returns the NodeKind of each Node + Kind() NodeKind + // HasResolversRecursively returns true if this Node or any child Node has a resolver + HasResolversRecursively() bool +} + +type RootNode interface { + Node + OperationType() ast.OperationType +} + +type Context struct { + context.Context + Variables Variables + ExtraArguments []datasource.Argument +} + +type Variables map[uint64][]byte + +type ResolvedArgument struct { + Key []byte + Value []byte +} + +type ResolvedArgs []ResolvedArgument + +func (r ResolvedArgs) Keys() [][]byte { + keys := make([][]byte, len(r)) + for i := range r { + keys[i] = (r)[i].Key + } + return keys +} + +func (r *ResolvedArgs) Filter(condition func(i int) (keep bool)) { + n := 0 + for i := range *r { + if condition(i) { + (*r)[n] = (*r)[i] + n++ + } + } + *r = (*r)[:n] +} + +func (r ResolvedArgs) ByKey(key []byte) []byte { + for i := 0; i < len(r); i++ { + if bytes.Equal(r[i].Key, key) { + return r[i].Value + } + } + return nil +} + +func (r ResolvedArgs) Dump() []string { + out := make([]string, len(r)) + for i := range r { + out[i] = string(r[i].Key) + "=" + string(r[i].Value) + } + return out +} + +type DataResolvingConfig struct { + PathSelector datasource.PathSelector + Transformation Transformation +} + +type Object struct { + DataResolvingConfig DataResolvingConfig + Fields []Field + Fetch Fetch + operationType ast.OperationType +} + +func (o *Object) OperationType() ast.OperationType { + return o.operationType +} + +type ArgsResolver interface { + ResolveArgs(args []datasource.Argument, data []byte) ResolvedArgs +} + +type Fetch interface { + Fetch(ctx Context, data []byte, argsResolver ArgsResolver, suffix string, buffers *LockableBufferMap) (n int, err error) +} + +type SingleFetch struct { + Source *DataSourceInvocation + BufferName string +} + +func (s *SingleFetch) Fetch(ctx Context, data []byte, argsResolver ArgsResolver, path string, buffers *LockableBufferMap) (int, error) { + bufferName := path + "." + s.BufferName + hash := xxhash.Sum64String(bufferName) + buffers.Lock() + buffer, exists := buffers.Buffers[hash] + buffers.Unlock() + if !exists { + buffer = bytes.NewBuffer(make([]byte, 0, 1024)) + buffers.Lock() + buffers.Buffers[hash] = buffer + buffers.Unlock() + } else { + buffer.Reset() + } + return s.Source.DataSource.Resolve(ctx, argsResolver.ResolveArgs(s.Source.Args, data), buffer) +} + +type SerialFetch struct { + Fetches []Fetch +} + +func (s *SerialFetch) Fetch(ctx Context, data []byte, argsResolver ArgsResolver, suffix string, buffers *LockableBufferMap) (n int, err error) { + for i := 0; i < len(s.Fetches); i++ { + nextN, nextErr := s.Fetches[i].Fetch(ctx, data, argsResolver, suffix, buffers) + if nextErr != nil { + return n, nextErr + } + n = n + nextN + } + return +} + +type ParallelFetch struct { + wg sync.WaitGroup + Fetches []Fetch +} + +func (p *ParallelFetch) Fetch(ctx Context, data []byte, argsResolver ArgsResolver, suffix string, buffers *LockableBufferMap) (n int, err error) { + for i := 0; i < len(p.Fetches); i++ { + p.wg.Add(1) + go func(fetch Fetch, ctx Context, data []byte, argsResolver ArgsResolver) { + _, _ = fetch.Fetch(ctx, data, argsResolver, suffix, buffers) // TODO: handle results + p.wg.Done() + }(p.Fetches[i], ctx, data, argsResolver) + } + p.wg.Wait() + return +} + +func (o *Object) HasResolversRecursively() bool { + for i := 0; i < len(o.Fields); i++ { + if o.Fields[i].HasResolversRecursively() { + return true + } + } + return false +} + +func (*Object) Kind() NodeKind { + return ObjectKind +} + +type BooleanCondition interface { + Evaluate(ctx Context, data []byte) bool +} + +type Field struct { + Name []byte + Value Node + Skip BooleanCondition + HasResolvedData bool +} + +func (f *Field) HasResolversRecursively() bool { + return f.HasResolvedData || f.Value.HasResolversRecursively() +} + +type IfEqual struct { + Left, Right datasource.Argument +} + +func (i *IfEqual) Evaluate(ctx Context, data []byte) bool { + var left []byte + var right []byte + + switch value := i.Left.(type) { + case *datasource.ContextVariableArgument: + left = ctx.Variables[xxhash.Sum64(value.VariableName)] + case *datasource.ObjectVariableArgument: + result := gjson.GetBytes(data, value.PathSelector.Path) + if result.Type == gjson.String { + left = unsafebytes.StringToBytes(result.Str) + } else { + left = unsafebytes.StringToBytes(result.Raw) + } + case *datasource.StaticVariableArgument: + left = value.Value + } + + switch value := i.Right.(type) { + case *datasource.ContextVariableArgument: + right = ctx.Variables[xxhash.Sum64(value.VariableName)] + case *datasource.ObjectVariableArgument: + result := gjson.GetBytes(data, value.PathSelector.Path) + if result.Type == gjson.String { + right = unsafebytes.StringToBytes(result.Str) + } else { + right = unsafebytes.StringToBytes(result.Raw) + } + case *datasource.StaticVariableArgument: + right = value.Value + } + + return bytes.Equal(left, right) +} + +type IfNotEqual struct { + Left, Right datasource.Argument +} + +func (i *IfNotEqual) Evaluate(ctx Context, data []byte) bool { + equal := IfEqual{ + Left: i.Left, + Right: i.Right, + } + return !equal.Evaluate(ctx, data) +} + +func (*Field) Kind() NodeKind { + return FieldKind +} + +type Value struct { + DataResolvingConfig DataResolvingConfig + ValueType JSONValueType +} + +func (value *Value) HasResolversRecursively() bool { + return false +} + +func (*Value) Kind() NodeKind { + return ValueKind +} + +type List struct { + DataResolvingConfig DataResolvingConfig + Value Node + Filter ListFilter +} + +func (l *List) HasResolversRecursively() bool { + return l.Value.HasResolversRecursively() +} + +func (*List) Kind() NodeKind { + return ListKind +} + +type ListFilter interface { + Kind() ListFilterKind +} + +type ListFilterKind int + +const ( + ListFilterKindFirstN ListFilterKind = iota + 1 +) + +type ListFilterFirstN struct { + FirstN int +} + +func (_ ListFilterFirstN) Kind() ListFilterKind { + return ListFilterKindFirstN +} + +type DataSourceInvocation struct { + Args []datasource.Argument + DataSource datasource.DataSource +} + +func isJSONObjectAsBytes(input []byte) bool { + trimmedInput := bytes.Trim(input, " ") + firstRune, _ := utf8.DecodeRune(trimmedInput) + lastRune, _ := utf8.DecodeLastRune(trimmedInput) + return fmt.Sprintf("%c", firstRune) == "{" && fmt.Sprintf("%c", lastRune) == "}" +} + +func byteSliceContainsEscapedQuotes(input []byte) bool { + return bytes.Contains(input, []byte(`\"`)) +} + +func byteSliceContainsQuotes(input []byte) bool { + return bytes.Contains(input, []byte(`"`)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/handler.go new file mode 100644 index 00000000000..c88b4b089d7 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/handler.go @@ -0,0 +1,110 @@ +//go:generate packr +//go:generate graphql-go-tools gen directiveUnmarshalCode -f ./graphql_definitions/**/*.graphql -p execution -o ./datasource_config.go -s Config +package execution + +import ( + "encoding/json" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + byte_template "github.com/jensneuse/byte-template" + + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation" + "github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource" +) + +type Handler struct { + templateDirectives []byte_template.DirectiveDefinition + base *datasource.BasePlanner +} + +func NewHandler(base *datasource.BasePlanner, templateDirectives []byte_template.DirectiveDefinition) *Handler { + return &Handler{ + templateDirectives: templateDirectives, + base: base, + } +} + +type GraphqlRequest struct { + OperationName string `json:"operationName"` + Variables json.RawMessage `json:"variables"` + Query string `json:"query"` +} + +func (h *Handler) Handle(requestData, extraVariables []byte) (executor *Executor, node RootNode, ctx Context, err error) { + + var graphqlRequest GraphqlRequest + err = json.Unmarshal(requestData, &graphqlRequest) + if err != nil { + return + } + + operationDocument, report := astparser.ParseGraphqlDocumentString(graphqlRequest.Query) + if report.HasErrors() { + err = report + return + } + + variables, extraArguments := VariablesFromJson(graphqlRequest.Variables, extraVariables) + + planner := NewPlanner(h.base) + if report.HasErrors() { + err = report + return + } + + astnormalization.NormalizeOperation(&operationDocument, h.base.Definition, &report) + if report.HasErrors() { + err = report + return + } + + validator := astvalidation.DefaultOperationValidator() + if report.HasErrors() { + err = report + return + } + validator.Validate(&operationDocument, h.base.Definition, &report) + if report.HasErrors() { + err = report + return + } + normalizer := astnormalization.NewNormalizer(true, true) + normalizer.NormalizeOperation(&operationDocument, h.base.Definition, &report) + if report.HasErrors() { + err = report + return + } + plan := planner.Plan(&operationDocument, h.base.Definition, graphqlRequest.OperationName, &report) + if report.HasErrors() { + err = report + return + } + + executor = NewExecutor(h.templateDirectives) + ctx = Context{ + Variables: variables, + ExtraArguments: extraArguments, + } + + return executor, plan, ctx, err +} + +func VariablesFromJson(requestVariables, extraVariables []byte) (variables Variables, extraArguments []datasource.Argument) { + variables = map[uint64][]byte{} + _ = jsonparser.ObjectEach(requestVariables, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + variables[xxhash.Sum64(key)] = value + return nil + }) + _ = jsonparser.ObjectEach(extraVariables, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + variables[xxhash.Sum64(key)] = value + extraArguments = append(extraArguments, &datasource.ContextVariableArgument{ + Name: key, + VariableName: key, + }) + return nil + }) + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype.go new file mode 100644 index 00000000000..49859fed81b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype.go @@ -0,0 +1,84 @@ +//go:generate stringer -type=JSONValueType +package execution + +import ( + "bytes" + "fmt" + "io" + "strconv" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type JSONValueType int + +const ( + UnknownValueType JSONValueType = iota + StringValueType + IntegerValueType + FloatValueType + BooleanValueType +) + +type ErrJSONValueTypeValueIncompatible struct { + value []byte + valueType JSONValueType +} + +func (e ErrJSONValueTypeValueIncompatible) Error() string { + return fmt.Sprintf("JSONValueType.writeValue: cannot write %s as %s", unsafebytes.BytesToString(e.value), e.valueType) +} + +func (i JSONValueType) writeValue(value, escapeBuf []byte, out io.Writer) (n int, err error) { + + if len(value) == 0 || bytes.Equal(value, literal.NULL) { + return i.write(n, err, out, literal.NULL) + } + + switch i { + case StringValueType: + return i.write(n, err, out, []byte(strconv.Quote(string(value)))) + case IntegerValueType: + if !unsafebytes.BytesIsValidInt64(value) { + return n, ErrJSONValueTypeValueIncompatible{ + value: value, + valueType: i, + } + } + return i.write(n, err, out, value) + case FloatValueType: + if !unsafebytes.BytesIsValidFloat32(value) { + return n, ErrJSONValueTypeValueIncompatible{ + value: value, + valueType: i, + } + } + return i.write(n, err, out, value) + case BooleanValueType: + if !unsafebytes.BytesIsValidBool(value) { + return n, ErrJSONValueTypeValueIncompatible{ + value: value, + valueType: i, + } + } + if unsafebytes.BytesToBool(value) { + return i.write(n, err, out, literal.TRUE) + } else { + return i.write(n, err, out, literal.FALSE) + } + default: + return n, ErrJSONValueTypeValueIncompatible{ + value: value, + valueType: i, + } + } +} + +func (i JSONValueType) write(n int, err error, out io.Writer, value []byte) (int, error) { + if err != nil { + return n, err + } + written, err := out.Write(value) + return n + written, err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype_string.go new file mode 100644 index 00000000000..885bb9805e6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/jsonvaluetype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=JSONValueType"; DO NOT EDIT. + +package execution + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnknownValueType-0] + _ = x[StringValueType-1] + _ = x[IntegerValueType-2] + _ = x[FloatValueType-3] + _ = x[BooleanValueType-4] +} + +const _JSONValueType_name = "UnknownValueTypeStringValueTypeIntegerValueTypeFloatValueTypeBooleanValueType" + +var _JSONValueType_index = [...]uint8{0, 16, 31, 47, 61, 77} + +func (i JSONValueType) String() string { + if i < 0 || i >= JSONValueType(len(_JSONValueType_index)-1) { + return "JSONValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _JSONValueType_name[_JSONValueType_index[i]:_JSONValueType_index[i+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/planning.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/planning.go new file mode 100644 index 00000000000..8cf85e78c8a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/planning.go @@ -0,0 +1,436 @@ +package execution + +import ( + "bytes" + "io" + "os" + + "github.com/jensneuse/pipeline/pkg/pipe" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type Planner struct { + walker *astvisitor.Walker + visitor *planningVisitor +} + +type DataSourceDefinition struct { + // the type name to which the data source is attached + TypeName []byte + // the field on the type to which the data source is attached + FieldName []byte + // a factory method to return a new planner + DataSourcePlannerFactory func() datasource.Planner +} + +func NewPlanner(base *datasource.BasePlanner) *Planner { + walker := astvisitor.NewWalker(48) + visitor := planningVisitor{ + Walker: &walker, + base: base, + } + + walker.RegisterDocumentVisitor(&visitor) + walker.RegisterEnterFieldVisitor(&visitor) + walker.RegisterEnterArgumentVisitor(&visitor) + walker.RegisterLeaveFieldVisitor(&visitor) + walker.RegisterEnterSelectionSetVisitor(&visitor) + walker.RegisterLeaveSelectionSetVisitor(&visitor) + walker.RegisterEnterInlineFragmentVisitor(&visitor) + walker.RegisterLeaveInlineFragmentVisitor(&visitor) + walker.RegisterEnterOperationVisitor(&visitor) + + return &Planner{ + walker: &walker, + visitor: &visitor, + } +} + +func (p *Planner) Plan(operation, definition *ast.Document, operationName string, report *operationreport.Report) RootNode { + p.visitor.operationName = operationName + p.walker.Walk(operation, definition, report) + return p.visitor.rootNode +} + +type planningVisitor struct { + *astvisitor.Walker + base *datasource.BasePlanner + operation, definition *ast.Document + rootNode RootNode + currentNode []Node + planners []dataSourcePlannerRef + operationName string + foundOperation bool + isSingleOperation bool +} + +type dataSourcePlannerRef struct { + path ast.Path + fieldRef int + planner datasource.Planner +} + +func (p *planningVisitor) EnterDocument(operation, definition *ast.Document) { + p.operation, p.definition, p.base.Definition = operation, definition, definition + p.foundOperation = false + p.isSingleOperation = p.countOperationDefinitionsInRootNodes() == 1 + + if len(operation.OperationDefinitions) == 0 { + p.Walker.StopWithExternalErr(operationreport.ErrDocumentDoesntContainExecutableOperation()) + return + } + + p.currentNode = p.currentNode[:0] + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.EnterDocument(operation, definition) + } +} + +func (p *planningVisitor) LeaveDocument(operation, definition *ast.Document) { + if !p.isSingleOperation && len(p.operationName) == 0 { + p.Report.AddExternalError(operationreport.ErrRequiredOperationNameIsMissing()) + } else if !p.foundOperation { + p.Report.AddExternalError(operationreport.ErrOperationWithProvidedOperationNameNotFound(p.operationName)) + } +} + +func (p *planningVisitor) EnterOperationDefinition(ref int) { + operationName := p.operation.OperationDefinitionNameString(ref) + if !p.isSingleOperation && operationName != p.operationName { + p.SkipNode() + return + } + + p.foundOperation = true + obj := &Object{} + p.rootNode = &Object{ + operationType: p.operation.OperationDefinitions[ref].OperationType, + Fields: []Field{ + { + Name: literal.DATA, + Value: obj, + }, + }, + } + p.currentNode = append(p.currentNode, obj) +} + +func (p *planningVisitor) EnterInlineFragment(ref int) { + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.EnterInlineFragment(ref) + } +} + +func (p *planningVisitor) LeaveInlineFragment(ref int) { + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.LeaveInlineFragment(ref) + } +} + +func (p *planningVisitor) EnterField(ref int) { + fieldName := p.operation.FieldNameUnsafeString(ref) + + definition, exists := p.FieldDefinition(ref) + if !exists { + return + } + + typeName := p.definition.NodeResolverTypeNameString(p.EnclosingTypeDefinition, p.Path) + + plannerFactory := p.base.Config.DataSourcePlannerFactoryForTypeField(typeName, fieldName) + if plannerFactory != nil { + planner := plannerFactory.DataSourcePlanner() + planner.Configure(p.operation, p.definition, p.Walker) + p.planners = append(p.planners, dataSourcePlannerRef{ + path: p.Path, + fieldRef: ref, + planner: planner, + }) + } + + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.EnterField(ref) + } + + switch parent := p.currentNode[len(p.currentNode)-1].(type) { + case *Object: + + var skipCondition BooleanCondition + ancestor := p.Ancestors[len(p.Ancestors)-2] + if ancestor.Kind == ast.NodeKindInlineFragment { + typeConditionName := p.operation.InlineFragmentTypeConditionName(ancestor.Ref) + skipCondition = &IfNotEqual{ + Left: &datasource.ObjectVariableArgument{ + PathSelector: datasource.PathSelector{ + Path: "__typename", + }, + }, + Right: &datasource.StaticVariableArgument{ + Value: typeConditionName, + }, + } + } + + dataResolvingConfig := p.fieldDataResolvingConfig(ref) + + var value Node + fieldDefinitionType := p.definition.FieldDefinitionType(definition) + + if p.definition.TypeIsList(fieldDefinitionType) { + + if !p.operation.FieldHasSelections(ref) { + value = &Value{ + ValueType: p.jsonValueType(fieldDefinitionType), + } + } else { + value = &Object{} + } + + list := &List{ + DataResolvingConfig: dataResolvingConfig, + Value: value, + } + + firstNValue, ok := p.FieldDefinitionDirectiveArgumentValueByName(ref, []byte("ListFilterFirstN"), []byte("n")) + if ok { + if firstNValue.Kind == ast.ValueKindInteger { + firstN := p.definition.IntValueAsInt32(firstNValue.Ref) + list.Filter = &ListFilterFirstN{ + FirstN: int(firstN), + } + } + } + + parent.Fields = append(parent.Fields, Field{ + Name: p.operation.FieldNameBytes(ref), + Value: list, + Skip: skipCondition, + }) + + p.currentNode = append(p.currentNode, value) + return + } + + if !p.operation.FieldHasSelections(ref) { + value = &Value{ + DataResolvingConfig: dataResolvingConfig, + ValueType: p.jsonValueType(fieldDefinitionType), + } + } else { + value = &Object{ + DataResolvingConfig: dataResolvingConfig, + } + } + + parent.Fields = append(parent.Fields, Field{ + Name: p.operation.FieldAliasOrNameBytes(ref), + Value: value, + Skip: skipCondition, + }) + + p.currentNode = append(p.currentNode, value) + } +} + +func (p *planningVisitor) EnterArgument(ref int) { + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.EnterArgument(ref) + } +} + +func (p *planningVisitor) LeaveField(ref int) { + + var plannedDataSource datasource.DataSource + var plannedArgs []datasource.Argument + + if len(p.planners) != 0 { + + p.planners[len(p.planners)-1].planner.LeaveField(ref) + + if p.planners[len(p.planners)-1].path.Equals(p.Path) && p.planners[len(p.planners)-1].fieldRef == ref { + plannedDataSource, plannedArgs = p.planners[len(p.planners)-1].planner.Plan(p.fieldContextVariableArguments(ref)) + p.planners = p.planners[:len(p.planners)-1] + + if len(p.currentNode) >= 2 { + switch parent := p.currentNode[len(p.currentNode)-2].(type) { + case *Object: + for i := 0; i < len(parent.Fields); i++ { + if bytes.Equal(p.operation.FieldAliasOrNameBytes(ref), parent.Fields[i].Name) { + + pathName := p.operation.FieldAliasOrNameString(ref) + parent.Fields[i].HasResolvedData = true + + singleFetch := &SingleFetch{ + Source: &DataSourceInvocation{ + Args: plannedArgs, + DataSource: plannedDataSource, + }, + BufferName: pathName, + } + + if parent.Fetch == nil { + parent.Fetch = singleFetch + } else { + switch fetch := parent.Fetch.(type) { + case *ParallelFetch: + fetch.Fetches = append(fetch.Fetches, singleFetch) + case *SerialFetch: + fetch.Fetches = append(fetch.Fetches, singleFetch) + case *SingleFetch: + first := *fetch + parent.Fetch = &ParallelFetch{ + Fetches: []Fetch{ + &first, + singleFetch, + }, + } + } + } + } + } + } + } + } + } + + p.currentNode = p.currentNode[:len(p.currentNode)-1] +} + +func (p *planningVisitor) fieldContextVariableArguments(ref int) []datasource.Argument { + // args + if p.operation.FieldHasArguments(ref) { + refs := p.operation.FieldArguments(ref) + out := make([]datasource.Argument, len(refs)) + for j, i := range refs { + argName := p.operation.ArgumentNameBytes(i) + value := p.operation.ArgumentValue(i) + if value.Kind != ast.ValueKindVariable { + continue + } + variableName := p.operation.VariableValueNameBytes(value.Ref) + name := append([]byte(".arguments."), argName...) + arg := &datasource.ContextVariableArgument{ + VariableName: variableName, + Name: make([]byte, len(name)), + } + copy(arg.Name, name) + out[j] = arg + } + return out + } + return nil +} + +func (p *planningVisitor) EnterSelectionSet(ref int) { + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.EnterSelectionSet(ref) + } +} + +func (p *planningVisitor) LeaveSelectionSet(ref int) { + if len(p.planners) != 0 { + p.planners[len(p.planners)-1].planner.LeaveSelectionSet(ref) + } +} + +func (p *planningVisitor) jsonValueType(valueType int) JSONValueType { + typeName := p.definition.ResolveTypeNameBytes(valueType) + switch { + case bytes.Equal(typeName, literal.INT): + return IntegerValueType + case bytes.Equal(typeName, literal.BOOLEAN): + return BooleanValueType + case bytes.Equal(typeName, literal.FLOAT): + return FloatValueType + default: + return StringValueType + } +} + +func (p *planningVisitor) fieldDataResolvingConfig(ref int) DataResolvingConfig { + return DataResolvingConfig{ + PathSelector: p.fieldPathSelector(ref), + Transformation: p.fieldTransformation(ref), + } +} + +func (p *planningVisitor) fieldPathSelector(ref int) (selector datasource.PathSelector) { + fieldName := p.operation.FieldNameUnsafeString(ref) + typeName := p.definition.NodeResolverTypeNameString(p.EnclosingTypeDefinition, p.Path) + mapping := p.base.Config.MappingForTypeField(typeName, fieldName) + if mapping == nil { + selector.Path = fieldName + return + } + if mapping.Disabled { + return + } + selector.Path = mapping.Path + return +} + +func (p *planningVisitor) fieldTransformation(ref int) Transformation { + definition, ok := p.FieldDefinition(ref) + if !ok { + return nil + } + transformationDirective, ok := p.definition.FieldDefinitionDirectiveByName(definition, literal.TRANSFORMATION) + if !ok { + return nil + } + modeValue, ok := p.definition.DirectiveArgumentValueByName(transformationDirective, literal.MODE) + if !ok || modeValue.Kind != ast.ValueKindEnum { + return nil + } + mode := unsafebytes.BytesToString(p.definition.EnumValueNameBytes(modeValue.Ref)) + switch mode { + case "PIPELINE": + return p.pipelineTransformation(transformationDirective) + default: + return nil + } +} + +func (p *planningVisitor) pipelineTransformation(directive int) *PipelineTransformation { + var configReader io.Reader + configFileStringValue, ok := p.definition.DirectiveArgumentValueByName(directive, literal.PIPELINE_CONFIG_FILE) + if ok && configFileStringValue.Kind == ast.ValueKindString { + reader, err := os.Open(p.definition.StringValueContentString(configFileStringValue.Ref)) + if err != nil { + return nil + } + defer reader.Close() + configReader = reader + } + configStringValue, ok := p.definition.DirectiveArgumentValueByName(directive, literal.PIPELINE_CONFIG_STRING) + if ok && configStringValue.Kind == ast.ValueKindString { + configReader = bytes.NewReader(p.definition.StringValueContentBytes(configStringValue.Ref)) + } + if configReader == nil { + return nil + } + var pipeline pipe.Pipeline + err := pipeline.FromConfig(configReader) + if err != nil { + return nil + } + return &PipelineTransformation{ + pipeline: pipeline, + } +} + +func (p *planningVisitor) countOperationDefinitionsInRootNodes() (count int) { + for i := range p.operation.RootNodes { + if p.operation.RootNodes[i].Kind == ast.NodeKindOperationDefinition { + count++ + } + } + + return count +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/transformation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/transformation.go new file mode 100644 index 00000000000..d90cf7316dd --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/execution/transformation.go @@ -0,0 +1,22 @@ +package execution + +import ( + "bytes" + + "github.com/jensneuse/pipeline/pkg/pipe" +) + +type Transformation interface { + Transform(input []byte) ([]byte, error) +} + +type PipelineTransformation struct { + pipeline pipe.Pipeline + buf bytes.Buffer +} + +func (p *PipelineTransformation) Transform(input []byte) ([]byte, error) { + p.buf.Reset() + err := p.pipeline.Run(bytes.NewReader(input), &p.buf) + return p.buf.Bytes(), err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer/fastbuffer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer/fastbuffer.go new file mode 100644 index 00000000000..4b7384cb575 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer/fastbuffer.go @@ -0,0 +1,51 @@ +package fastbuffer + +import ( + "reflect" + "unsafe" +) + +func New() *FastBuffer { + return &FastBuffer{ + b: make([]byte, 0, 1024), + } +} + +type FastBuffer struct { + b []byte +} + +func (f *FastBuffer) Write(p []byte) (n int, err error) { + f.b = append(f.b, p...) + return len(p), nil +} + +func (f *FastBuffer) Reset() { + f.b = f.b[:0] +} + +func (f *FastBuffer) WriteBytes(b []byte) { + f.b = append(f.b, b...) +} + +func (f *FastBuffer) WriteString(s string) { + f.b = append(f.b, s...) +} + +func (f *FastBuffer) Bytes() []byte { + return f.b +} + +func (f *FastBuffer) Len() int { + return len(f.b) +} + +func (f *FastBuffer) UnsafeString() string { + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&f.b)) + stringHeader := reflect.StringHeader{Data: sliceHeader.Data, Len: sliceHeader.Len} + return *(*string)(unsafe.Pointer(&stringHeader)) //nolint:govet +} + +func (f *FastBuffer) String() string { + return string(f.b) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/schema.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/schema.go new file mode 100644 index 00000000000..1b2cf73157a --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/schema.go @@ -0,0 +1,182 @@ +package federation + +import ( + "fmt" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func BuildBaseSchemaDocument(serviceSDLs ...string) (string, error) { + return sdlmerge.MergeSDLs(serviceSDLs...) +} + +func BuildFederationSchema(baseSchema, serviceSDL string) (string, error) { + builder := schemaBuilder{} + return builder.buildFederationSchema(baseSchema, serviceSDL) +} + +// schemaBuilder makes GraphQL schemas compliant with the Apollo Federation Specification +type schemaBuilder struct { +} + +// BuildFederationSchema takes a baseSchema plus the service sdl and turns it into a fully compliant federation schema +func (s *schemaBuilder) buildFederationSchema(baseSchema, serviceSDL string) (string, error) { + unionTypes := s.entityUnionTypes(serviceSDL) + if len(unionTypes) == 0 { + return baseSchema, nil + } + allUnionTypes := strings.Join(unionTypes, " | ") + federationExtension := fmt.Sprintf(federationTemplate, allUnionTypes) + + baseSchema = s.extendQueryTypeWithFederationFields(baseSchema) + + federatedSchema := baseSchema + federationExtension + return federatedSchema, nil +} + +func (s *schemaBuilder) extendQueryTypeWithFederationFields(schema string) string { + doc := ast.NewDocument() + doc.Input.ResetInputString(schema) + parser := astparser.NewParser() + report := &operationreport.Report{} + parser.Parse(doc, report) + if report.HasErrors() { + return schema + } + queryTypeName := doc.Index.QueryTypeName.String() + if queryTypeName == "" { + queryTypeName = "Query" + } + for i := range doc.ObjectTypeDefinitions { + name := doc.ObjectTypeDefinitionNameString(i) + if name == queryTypeName { + s.extendQueryType(doc, i) + out, err := astprinter.PrintStringIndent(doc, nil, " ") + if err != nil { + return schema + } + return out + } + } + return schema +} + +func (s *schemaBuilder) extendQueryType(doc *ast.Document, ref int) { + serviceType := doc.AddNonNullNamedType([]byte("_Service")) + + serviceFieldDefRef := doc.ImportFieldDefinition( + "_service", + "", + serviceType, + nil, + nil, + ) + + doc.ObjectTypeDefinitions[ref].HasFieldDefinitions = true + doc.ObjectTypeDefinitions[ref].FieldsDefinition.Refs = append(doc.ObjectTypeDefinitions[ref].FieldsDefinition.Refs, serviceFieldDefRef) + + anyType := doc.AddNonNullNamedType([]byte("_Any")) + entityType := doc.AddNamedType([]byte("_Entity")) + listOfAnyType := doc.AddListType(anyType) + nonNullListOfAnyType := doc.AddNonNullType(listOfAnyType) + listOfEntityType := doc.AddListType(entityType) + nonNullListOfEntityType := doc.AddNonNullType(listOfEntityType) + + representationsArg := doc.ImportInputValueDefinition( + "representations", + "", + nonNullListOfAnyType, + ast.DefaultValue{}) + + entitiesFDRef := doc.ImportFieldDefinition( + "_entities", + "", + nonNullListOfEntityType, + []int{representationsArg}, + nil) + + doc.ObjectTypeDefinitions[ref].FieldsDefinition.Refs = append(doc.ObjectTypeDefinitions[ref].FieldsDefinition.Refs, entitiesFDRef) +} + +// _entities(representations: [_Any!]!): [_Entity]! +// _service: _Service! + +func (s *schemaBuilder) entityUnionTypes(serviceSDL string) []string { + doc := ast.NewDocument() + doc.Input.ResetInputString(serviceSDL) + parser := astparser.NewParser() + report := &operationreport.Report{} + parser.Parse(doc, report) + if report.HasErrors() { + return nil + } + + walker := astvisitor.NewWalker(4) + visitor := &schemaBuilderVisitor{} + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterObjectTypeDefinitionVisitor(visitor) + walker.RegisterEnterObjectTypeExtensionVisitor(visitor) + walker.Walk(doc, nil, report) + if report.HasErrors() { + return nil + } + return visitor.entityUnionTypes +} + +type schemaBuilderVisitor struct { + definition *ast.Document + entityUnionTypes []string +} + +func (s *schemaBuilderVisitor) addEntity(entity string) { + for i := range s.entityUnionTypes { + if s.entityUnionTypes[i] == entity { + return + } + } + s.entityUnionTypes = append(s.entityUnionTypes, entity) +} + +func (s *schemaBuilderVisitor) EnterDocument(operation, definition *ast.Document) { + s.definition = operation +} + +func (s *schemaBuilderVisitor) EnterObjectTypeExtension(ref int) { + for _, i := range s.definition.ObjectTypeExtensions[ref].Directives.Refs { + if s.definition.DirectiveNameString(i) == "key" { + s.addEntity(s.definition.ObjectTypeExtensionNameString(ref)) + } + } +} + +func (s *schemaBuilderVisitor) EnterObjectTypeDefinition(ref int) { + for _, i := range s.definition.ObjectTypeDefinitions[ref].Directives.Refs { + if s.definition.DirectiveNameString(i) == "key" { + s.addEntity(s.definition.ObjectTypeDefinitionNameString(ref)) + } + } +} + +const federationTemplate = ` + +scalar _Any +scalar _FieldSet + +union _Entity = %s + +type _Service { + sdl: String +} + +directive @external on FIELD_DEFINITION +directive @requires(fields: _FieldSet!) on FIELD_DEFINITION +directive @provides(fields: _FieldSet!) on FIELD_DEFINITION +directive @key(fields: _FieldSet!) on OBJECT | INTERFACE +directive @extends on OBJECT | INTERFACE +` diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/collect_entities.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/collect_entities.go new file mode 100644 index 00000000000..e542eedd789 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/collect_entities.go @@ -0,0 +1,62 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type collectEntitiesVisitor struct { + *astvisitor.Walker + document *ast.Document + collectedEntities entitySet +} + +func newCollectEntitiesVisitor(collectedEntities entitySet) *collectEntitiesVisitor { + return &collectEntitiesVisitor{ + collectedEntities: collectedEntities, + } +} + +func (c *collectEntitiesVisitor) Register(walker *astvisitor.Walker) { + c.Walker = walker + walker.RegisterEnterDocumentVisitor(c) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(c) + walker.RegisterEnterObjectTypeDefinitionVisitor(c) +} + +func (c *collectEntitiesVisitor) EnterDocument(operation, _ *ast.Document) { + c.document = operation +} + +func (c *collectEntitiesVisitor) EnterInterfaceTypeDefinition(ref int) { + interfaceType := c.document.InterfaceTypeDefinitions[ref] + name := c.document.InterfaceTypeDefinitionNameString(ref) + if err := c.resolvePotentialEntity(name, interfaceType.Directives.Refs); err != nil { + c.StopWithExternalErr(*err) + } +} + +func (c *collectEntitiesVisitor) EnterObjectTypeDefinition(ref int) { + objectType := c.document.ObjectTypeDefinitions[ref] + name := c.document.ObjectTypeDefinitionNameString(ref) + if err := c.resolvePotentialEntity(name, objectType.Directives.Refs); err != nil { + c.StopWithExternalErr(*err) + } +} + +func (c *collectEntitiesVisitor) resolvePotentialEntity(name string, directiveRefs []int) *operationreport.ExternalError { + if _, exists := c.collectedEntities[name]; exists { + err := operationreport.ErrEntitiesMustNotBeDuplicated(name) + return &err + } + for _, directiveRef := range directiveRefs { + if c.document.DirectiveNameString(directiveRef) != plan.FederationKeyDirectiveName { + continue + } + c.collectedEntities[name] = struct{}{} + return nil + } + return nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/enum_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/enum_type_extending.go new file mode 100644 index 00000000000..7a5f9a11c20 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/enum_type_extending.go @@ -0,0 +1,50 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type extendEnumTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document +} + +func newExtendEnumTypeDefinition() *extendEnumTypeDefinitionVisitor { + return &extendEnumTypeDefinitionVisitor{} +} + +func (e *extendEnumTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterEnumTypeExtensionVisitor(e) +} + +func (e *extendEnumTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendEnumTypeDefinitionVisitor) EnterEnumTypeExtension(ref int) { + nodes, exists := e.document.Index.NodesByNameBytes(e.document.EnumTypeExtensionNameBytes(ref)) + if !exists { + return + } + + hasExtended := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindEnumTypeDefinition { + continue + } + if hasExtended { + e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.EnumTypeExtensionNameString(ref))) + return + } + e.document.ExtendEnumTypeDefinitionByEnumTypeExtension(nodes[i].Ref, ref) + hasExtended = true + } + + if !hasExtended { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.EnumTypeExtensionNameBytes(ref))) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/input_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/input_type_extending.go new file mode 100644 index 00000000000..6715d3a1a92 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/input_type_extending.go @@ -0,0 +1,50 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func newExtendInputObjectTypeDefinition() *extendInputObjectTypeDefinitionVisitor { + return &extendInputObjectTypeDefinitionVisitor{} +} + +type extendInputObjectTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document +} + +func (e *extendInputObjectTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterInputObjectTypeExtensionVisitor(e) +} + +func (e *extendInputObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendInputObjectTypeDefinitionVisitor) EnterInputObjectTypeExtension(ref int) { + nodes, exists := e.document.Index.NodesByNameBytes(e.document.InputObjectTypeExtensionNameBytes(ref)) + if !exists { + return + } + + hasExtended := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindInputObjectTypeDefinition { + continue + } + if hasExtended { + e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.InputObjectTypeExtensionNameString(ref))) + return + } + e.document.ExtendInputObjectTypeDefinitionByInputObjectTypeExtension(nodes[i].Ref, ref) + hasExtended = true + } + + if !hasExtended { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.InputObjectTypeExtensionNameBytes(ref))) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/interface_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/interface_type_extending.go new file mode 100644 index 00000000000..179d0e97cda --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/interface_type_extending.go @@ -0,0 +1,63 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func newExtendInterfaceTypeDefinition(collectedEntities entitySet) *extendInterfaceTypeDefinitionVisitor { + return &extendInterfaceTypeDefinitionVisitor{ + collectedEntities: collectedEntities, + } +} + +type extendInterfaceTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document + collectedEntities entitySet +} + +func (e *extendInterfaceTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterInterfaceTypeExtensionVisitor(e) +} + +func (e *extendInterfaceTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendInterfaceTypeDefinitionVisitor) EnterInterfaceTypeExtension(ref int) { + nameBytes := e.document.InterfaceTypeExtensionNameBytes(ref) + nodes, exists := e.document.Index.NodesByNameBytes(nameBytes) + if !exists { + return + } + + var nodeToExtend *ast.Node + isEntity := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindInterfaceTypeDefinition { + continue + } + if nodeToExtend != nil { + e.StopWithExternalErr(*multipleExtensionError(isEntity, nameBytes)) + return + } + var err *operationreport.ExternalError + extension := e.document.InterfaceTypeExtensions[ref] + if isEntity, err = e.collectedEntities.isExtensionForEntity(nameBytes, extension.Directives.Refs, e.document); err != nil { + e.StopWithExternalErr(*err) + return + } + nodeToExtend = &nodes[i] + } + + if nodeToExtend == nil { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.InterfaceTypeExtensionNameBytes(ref))) + return + } + + e.document.ExtendInterfaceTypeDefinitionByInterfaceTypeExtension(nodeToExtend.Ref, ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/object_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/object_type_extending.go new file mode 100644 index 00000000000..4fd86ecd895 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/object_type_extending.go @@ -0,0 +1,66 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func newExtendObjectTypeDefinition(collectedEntities entitySet) *extendObjectTypeDefinitionVisitor { + return &extendObjectTypeDefinitionVisitor{ + collectedEntities: collectedEntities, + } +} + +type extendObjectTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document + collectedEntities entitySet +} + +func (e *extendObjectTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterObjectTypeExtensionVisitor(e) +} + +func (e *extendObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendObjectTypeDefinitionVisitor) EnterObjectTypeExtension(ref int) { + nameBytes := e.document.ObjectTypeExtensionNameBytes(ref) + nodes, exists := e.document.Index.NodesByNameBytes(nameBytes) + if !exists { + return + } + + var nodeToExtend *ast.Node + isEntity := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindObjectTypeDefinition { + continue + } + if nodeToExtend != nil { + e.StopWithExternalErr(*multipleExtensionError(isEntity, nameBytes)) + return + } + var err *operationreport.ExternalError + extension := e.document.ObjectTypeExtensions[ref] + if isEntity, err = e.collectedEntities.isExtensionForEntity(nameBytes, extension.Directives.Refs, e.document); err != nil { + e.StopWithExternalErr(*err) + return + } + nodeToExtend = &nodes[i] + if ast.IsRootType(nameBytes) { + break + } + } + + if nodeToExtend == nil { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(nameBytes)) + return + } + + e.document.ExtendObjectTypeDefinitionByObjectTypeExtension(nodeToExtend.Ref, ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fielded_shared_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fielded_shared_types.go new file mode 100644 index 00000000000..44434b87a56 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fielded_shared_types.go @@ -0,0 +1,107 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type removeDuplicateFieldedSharedTypesVisitor struct { + *astvisitor.Walker + document *ast.Document + sharedTypeSet map[string]fieldedSharedType + rootNodesToRemove []ast.Node + lastInputRef int + lastInterfaceRef int + lastObjectRef int +} + +func newRemoveDuplicateFieldedSharedTypesVisitor() *removeDuplicateFieldedSharedTypesVisitor { + return &removeDuplicateFieldedSharedTypesVisitor{ + nil, + nil, + make(map[string]fieldedSharedType), + nil, + ast.InvalidRef, + ast.InvalidRef, + ast.InvalidRef, + } +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) Register(walker *astvisitor.Walker) { + r.Walker = walker + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterEnterInputObjectTypeDefinitionVisitor(r) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(r) + walker.RegisterEnterObjectTypeDefinitionVisitor(r) + walker.RegisterLeaveDocumentVisitor(r) +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) EnterDocument(operation, _ *ast.Document) { + r.document = operation +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) EnterInputObjectTypeDefinition(ref int) { + if ref <= r.lastInputRef { + return + } + name := r.document.InputObjectTypeDefinitionNameString(ref) + refs := r.document.InputObjectTypeDefinitions[ref].InputFieldsDefinition.Refs + input, exists := r.sharedTypeSet[name] + if exists { + if !input.areFieldsIdentical(refs) { + r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name)) + return + } + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindInputObjectTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindInputValueDefinition, refs) + } + r.lastInputRef = ref +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) EnterInterfaceTypeDefinition(ref int) { + if ref <= r.lastInterfaceRef { + return + } + name := r.document.InterfaceTypeDefinitionNameString(ref) + interfaceType := r.document.InterfaceTypeDefinitions[ref] + refs := interfaceType.FieldsDefinition.Refs + iFace, exists := r.sharedTypeSet[name] + if exists { + if !iFace.areFieldsIdentical(refs) { + r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name)) + return + } + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindInterfaceTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindFieldDefinition, refs) + } + r.lastInterfaceRef = ref +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) EnterObjectTypeDefinition(ref int) { + if ref <= r.lastObjectRef { + return + } + name := r.document.ObjectTypeDefinitionNameString(ref) + objectType := r.document.ObjectTypeDefinitions[ref] + refs := objectType.FieldsDefinition.Refs + object, exists := r.sharedTypeSet[name] + if exists { + if !object.areFieldsIdentical(refs) { + r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name)) + return + } + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindObjectTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindFieldDefinition, refs) + } + r.lastObjectRef = ref +} + +func (r *removeDuplicateFieldedSharedTypesVisitor) LeaveDocument(_, _ *ast.Document) { + if r.rootNodesToRemove != nil { + r.document.DeleteRootNodes(r.rootNodesToRemove) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fieldless_shared_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fieldless_shared_types.go new file mode 100644 index 00000000000..6039413ebf7 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_duplicate_fieldless_shared_types.go @@ -0,0 +1,98 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type removeDuplicateFieldlessSharedTypesVisitor struct { + *astvisitor.Walker + document *ast.Document + sharedTypeSet map[string]fieldlessSharedType + rootNodesToRemove []ast.Node + lastEnumRef int + lastUnionRef int + lastScalarRef int +} + +func newRemoveDuplicateFieldlessSharedTypesVisitor() *removeDuplicateFieldlessSharedTypesVisitor { + return &removeDuplicateFieldlessSharedTypesVisitor{ + nil, + nil, + make(map[string]fieldlessSharedType), + nil, + ast.InvalidRef, + ast.InvalidRef, + ast.InvalidRef, + } +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) Register(walker *astvisitor.Walker) { + r.Walker = walker + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterEnterEnumTypeDefinitionVisitor(r) + walker.RegisterEnterScalarTypeDefinitionVisitor(r) + walker.RegisterEnterUnionTypeDefinitionVisitor(r) + walker.RegisterLeaveDocumentVisitor(r) +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterDocument(operation, _ *ast.Document) { + r.document = operation +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterEnumTypeDefinition(ref int) { + if ref <= r.lastEnumRef { + return + } + name := r.document.EnumTypeDefinitionNameString(ref) + enum, exists := r.sharedTypeSet[name] + if exists { + if !enum.areValuesIdentical(r.document.EnumTypeDefinitions[ref].EnumValuesDefinition.Refs) { + r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name)) + return + } + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindEnumTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = newEnumSharedType(r.document, ref) + } + r.lastEnumRef = ref +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterScalarTypeDefinition(ref int) { + if ref <= r.lastScalarRef { + return + } + name := r.document.ScalarTypeDefinitionNameString(ref) + _, exists := r.sharedTypeSet[name] + if exists { + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindScalarTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = scalarSharedType{} + } + r.lastScalarRef = ref +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterUnionTypeDefinition(ref int) { + if ref <= r.lastUnionRef { + return + } + name := r.document.UnionTypeDefinitionNameString(ref) + union, exists := r.sharedTypeSet[name] + if exists { + if !union.areValuesIdentical(r.document.UnionTypeDefinitions[ref].UnionMemberTypes.Refs) { + r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name)) + return + } + r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindUnionTypeDefinition, Ref: ref}) + } else { + r.sharedTypeSet[name] = newUnionSharedType(r.document, ref) + } + r.lastUnionRef = ref +} + +func (r *removeDuplicateFieldlessSharedTypesVisitor) LeaveDocument(_, _ *ast.Document) { + if r.rootNodesToRemove != nil { + r.document.DeleteRootNodes(r.rootNodesToRemove) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_empty_object_type_definition.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_empty_object_type_definition.go new file mode 100644 index 00000000000..d8c5d3f1e1b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_empty_object_type_definition.go @@ -0,0 +1,32 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveEmptyObjectTypeDefinition() *removeEmptyObjectTypeDefinition { + return &removeEmptyObjectTypeDefinition{} +} + +type removeEmptyObjectTypeDefinition struct{} + +func (r *removeEmptyObjectTypeDefinition) Register(walker *astvisitor.Walker) { + walker.RegisterLeaveDocumentVisitor(r) +} + +func (r *removeEmptyObjectTypeDefinition) LeaveDocument(operation, _ *ast.Document) { + for ref := range operation.ObjectTypeDefinitions { + if operation.ObjectTypeDefinitions[ref].HasFieldDefinitions { + continue + } + + name := operation.ObjectTypeDefinitionNameString(ref) + node, ok := operation.Index.FirstNodeByNameStr(name) + if !ok { + return + } + + operation.RemoveRootNode(node) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_by_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_by_directive.go new file mode 100644 index 00000000000..9e758c169e3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_by_directive.go @@ -0,0 +1,46 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveFieldDefinitions(directives ...string) *removeFieldDefinitionByDirective { + directivesSet := make(map[string]struct{}, len(directives)) + for _, directive := range directives { + directivesSet[directive] = struct{}{} + } + + return &removeFieldDefinitionByDirective{ + directives: directivesSet, + } +} + +type removeFieldDefinitionByDirective struct { + operation *ast.Document + directives map[string]struct{} +} + +func (r *removeFieldDefinitionByDirective) Register(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterLeaveObjectTypeDefinitionVisitor(r) +} + +func (r *removeFieldDefinitionByDirective) EnterDocument(operation, _ *ast.Document) { + r.operation = operation +} + +func (r *removeFieldDefinitionByDirective) LeaveObjectTypeDefinition(ref int) { + var refsForDeletion []int + // select fields for deletion + for _, fieldRef := range r.operation.ObjectTypeDefinitions[ref].FieldsDefinition.Refs { + for _, directiveRef := range r.operation.FieldDefinitions[fieldRef].Directives.Refs { + directiveName := r.operation.DirectiveNameString(directiveRef) + if _, ok := r.directives[directiveName]; ok { + refsForDeletion = append(refsForDeletion, fieldRef) + } + } + } + // delete fields + r.operation.RemoveFieldDefinitionsFromObjectTypeDefinition(refsForDeletion, ref) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_directive.go new file mode 100644 index 00000000000..80ce00a62e5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_field_definition_directive.go @@ -0,0 +1,44 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveFieldDefinitionDirective(directives ...string) *removeFieldDefinitionDirective { + directivesSet := make(map[string]struct{}, len(directives)) + for _, directive := range directives { + directivesSet[directive] = struct{}{} + } + + return &removeFieldDefinitionDirective{ + directives: directivesSet, + } +} + +type removeFieldDefinitionDirective struct { + operation *ast.Document + directives map[string]struct{} +} + +func (r *removeFieldDefinitionDirective) Register(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterEnterFieldDefinitionVisitor(r) +} + +func (r *removeFieldDefinitionDirective) EnterDocument(operation, _ *ast.Document) { + r.operation = operation +} + +func (r *removeFieldDefinitionDirective) EnterFieldDefinition(ref int) { + var refsForDeletion []int + // select directives for deletion + for _, directiveRef := range r.operation.FieldDefinitions[ref].Directives.Refs { + directiveName := r.operation.DirectiveNameString(directiveRef) + if _, ok := r.directives[directiveName]; ok { + refsForDeletion = append(refsForDeletion, directiveRef) + } + } + // delete directives + r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindFieldDefinition, Ref: ref}, refsForDeletion) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_interface_definition_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_interface_definition_directive.go new file mode 100644 index 00000000000..dbda8d8457b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_interface_definition_directive.go @@ -0,0 +1,45 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveInterfaceDefinitionDirective(directives ...string) *removeInterfaceDefinitionDirective { + directivesSet := make(map[string]struct{}, len(directives)) + for _, directive := range directives { + directivesSet[directive] = struct{}{} + } + + return &removeInterfaceDefinitionDirective{ + directives: directivesSet, + } +} + +type removeInterfaceDefinitionDirective struct { + *astvisitor.Walker + operation *ast.Document + directives map[string]struct{} +} + +func (r *removeInterfaceDefinitionDirective) Register(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterEnterInterfaceTypeDefinitionVisitor(r) +} + +func (r *removeInterfaceDefinitionDirective) EnterDocument(operation, _ *ast.Document) { + r.operation = operation +} + +func (r *removeInterfaceDefinitionDirective) EnterInterfaceTypeDefinition(ref int) { + var refsForDeletion []int + // select fields for deletion + for _, directiveRef := range r.operation.InterfaceTypeDefinitions[ref].Directives.Refs { + directiveName := r.operation.DirectiveNameString(directiveRef) + if _, ok := r.directives[directiveName]; ok { + refsForDeletion = append(refsForDeletion, directiveRef) + } + } + // delete directives + r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindInterfaceTypeDefinition, Ref: ref}, refsForDeletion) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_object_type_definition_directive.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_object_type_definition_directive.go new file mode 100644 index 00000000000..5abfb85e366 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_object_type_definition_directive.go @@ -0,0 +1,44 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveObjectTypeDefinitionDirective(directives ...string) *removeObjectTypeDefinitionDirective { + directivesSet := make(map[string]struct{}, len(directives)) + for _, directive := range directives { + directivesSet[directive] = struct{}{} + } + + return &removeObjectTypeDefinitionDirective{ + directives: directivesSet, + } +} + +type removeObjectTypeDefinitionDirective struct { + operation *ast.Document + directives map[string]struct{} +} + +func (r *removeObjectTypeDefinitionDirective) Register(walker *astvisitor.Walker) { + walker.RegisterEnterDocumentVisitor(r) + walker.RegisterEnterObjectTypeDefinitionVisitor(r) +} + +func (r *removeObjectTypeDefinitionDirective) EnterDocument(operation, _ *ast.Document) { + r.operation = operation +} + +func (r *removeObjectTypeDefinitionDirective) EnterObjectTypeDefinition(ref int) { + var refsForDeletion []int + // select fields for deletion + for _, directiveRef := range r.operation.ObjectTypeDefinitions[ref].Directives.Refs { + directiveName := r.operation.DirectiveNameString(directiveRef) + if _, ok := r.directives[directiveName]; ok { + refsForDeletion = append(refsForDeletion, directiveRef) + } + } + // delete directives + r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindObjectTypeDefinition, Ref: ref}, refsForDeletion) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_type_extensions.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_type_extensions.go new file mode 100644 index 00000000000..b2b82d6f2f9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/remove_type_extensions.go @@ -0,0 +1,21 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" +) + +func newRemoveMergedTypeExtensions() *removeMergedTypeExtensionsVisitor { + return &removeMergedTypeExtensionsVisitor{} +} + +type removeMergedTypeExtensionsVisitor struct { +} + +func (r *removeMergedTypeExtensionsVisitor) Register(walker *astvisitor.Walker) { + walker.RegisterLeaveDocumentVisitor(r) +} + +func (r *removeMergedTypeExtensionsVisitor) LeaveDocument(operation, definition *ast.Document) { + operation.RemoveMergedTypeExtensions() +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/scalar_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/scalar_type_extending.go new file mode 100644 index 00000000000..f396eed8086 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/scalar_type_extending.go @@ -0,0 +1,49 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func newExtendScalarTypeDefinition() *extendScalarTypeDefinitionVisitor { + return &extendScalarTypeDefinitionVisitor{} +} + +type extendScalarTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document +} + +func (e *extendScalarTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterScalarTypeExtensionVisitor(e) +} + +func (e *extendScalarTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendScalarTypeDefinitionVisitor) EnterScalarTypeExtension(ref int) { + nodes, exists := e.document.Index.NodesByNameBytes(e.document.ScalarTypeExtensionNameBytes(ref)) + if !exists { + return + } + + hasExtended := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindScalarTypeDefinition { + continue + } + if hasExtended { + e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.ScalarTypeExtensionNameString(ref))) + return + } + e.document.ExtendScalarTypeDefinitionByScalarTypeExtension(nodes[i].Ref, ref) + hasExtended = true + } + if !hasExtended { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.ScalarTypeExtensionNameBytes(ref))) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/sdlmerge.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/sdlmerge.go new file mode 100644 index 00000000000..300a74acc37 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/sdlmerge.go @@ -0,0 +1,204 @@ +package sdlmerge + +import ( + "fmt" + "github.com/TykTechnologies/graphql-go-tools/pkg/asttransform" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const ( + rootOperationTypeDefinitions = ` + type Query {} + type Mutation {} + type Subscription {} + ` + + parseDocumentError = "parse graphql document string: %w" +) + +type Visitor interface { + Register(walker *astvisitor.Walker) +} + +func MergeAST(ast *ast.Document) error { + normalizer := normalizer{} + normalizer.setupWalkers() + + return normalizer.normalize(ast) +} + +func MergeSDLs(SDLs ...string) (string, error) { + rawDocs := make([]string, 0, len(SDLs)+1) + rawDocs = append(rawDocs, rootOperationTypeDefinitions) + rawDocs = append(rawDocs, SDLs...) + if validationError := validateSubgraphs(rawDocs[1:]); validationError != nil { + return "", validationError + } + if normalizationError := normalizeSubgraphs(rawDocs[1:]); normalizationError != nil { + return "", normalizationError + } + + doc, report := astparser.ParseGraphqlDocumentString(strings.Join(rawDocs, "\n")) + if report.HasErrors() { + return "", fmt.Errorf("parse graphql document string: %w", report) + } + + astnormalization.NormalizeSubgraphSDL(&doc, &report) + if report.HasErrors() { + return "", fmt.Errorf("merge ast: %w", report) + } + + if err := MergeAST(&doc); err != nil { + return "", fmt.Errorf("merge ast: %w", err) + } + + out, err := astprinter.PrintString(&doc, nil) + if err != nil { + return "", fmt.Errorf("stringify schema: %w", err) + } + + return out, nil +} + +func validateSubgraphs(subgraphs []string) error { + validator := astvalidation.NewDefinitionValidator( + astvalidation.PopulatedTypeBodies(), astvalidation.KnownTypeNames(), + ) + for _, subgraph := range subgraphs { + doc, report := astparser.ParseGraphqlDocumentString(subgraph) + if err := asttransform.MergeDefinitionWithBaseSchema(&doc); err != nil { + return err + } + if report.HasErrors() { + return fmt.Errorf(parseDocumentError, report) + } + validator.Validate(&doc, &report) + if report.HasErrors() { + return fmt.Errorf("validate schema: %w", report) + } + } + return nil +} + +func normalizeSubgraphs(subgraphs []string) error { + subgraphNormalizer := astnormalization.NewSubgraphDefinitionNormalizer() + for i, subgraph := range subgraphs { + doc, report := astparser.ParseGraphqlDocumentString(subgraph) + if report.HasErrors() { + return fmt.Errorf(parseDocumentError, report) + } + subgraphNormalizer.NormalizeDefinition(&doc, &report) + if report.HasErrors() { + return fmt.Errorf("normalize schema: %w", report) + } + out, err := astprinter.PrintString(&doc, nil) + if err != nil { + return fmt.Errorf("stringify schema: %w", err) + } + subgraphs[i] = out + } + return nil +} + +type normalizer struct { + walkers []*astvisitor.Walker +} + +type entitySet map[string]struct{} + +func (m *normalizer) setupWalkers() { + collectedEntities := make(entitySet) + visitorGroups := [][]Visitor{ + { + newCollectEntitiesVisitor(collectedEntities), + }, + { + newExtendEnumTypeDefinition(), + newExtendInputObjectTypeDefinition(), + newExtendInterfaceTypeDefinition(collectedEntities), + newExtendScalarTypeDefinition(), + newExtendUnionTypeDefinition(), + newExtendObjectTypeDefinition(collectedEntities), + newRemoveEmptyObjectTypeDefinition(), + newRemoveMergedTypeExtensions(), + }, + // visitors for cleaning up federated duplicated fields and directives + { + newRemoveFieldDefinitions("external"), + newRemoveDuplicateFieldedSharedTypesVisitor(), + newRemoveDuplicateFieldlessSharedTypesVisitor(), + newRemoveInterfaceDefinitionDirective("key"), + newRemoveObjectTypeDefinitionDirective("key"), + newRemoveFieldDefinitionDirective("provides", "requires"), + }, + } + + for _, visitorGroup := range visitorGroups { + walker := astvisitor.NewWalker(48) + for _, visitor := range visitorGroup { + visitor.Register(&walker) + m.walkers = append(m.walkers, &walker) + } + } +} + +func (m *normalizer) normalize(operation *ast.Document) error { + report := operationreport.Report{} + + for _, walker := range m.walkers { + walker.Walk(operation, nil, &report) + if report.HasErrors() { + return fmt.Errorf("walk: %w", report) + } + } + + return nil +} + +func (e entitySet) isExtensionForEntity(nameBytes []byte, directiveRefs []int, document *ast.Document) (bool, *operationreport.ExternalError) { + name := string(nameBytes) + hasDirectives := len(directiveRefs) > 0 + if _, exists := e[name]; !exists { + if !hasDirectives || !isEntityExtension(directiveRefs, document) { + return false, nil + } + err := operationreport.ErrExtensionWithKeyDirectiveMustExtendEntity(name) + return false, &err + } + if !hasDirectives { + err := operationreport.ErrEntityExtensionMustHaveKeyDirective(name) + return false, &err + } + if isEntityExtension(directiveRefs, document) { + return true, nil + } + err := operationreport.ErrEntityExtensionMustHaveKeyDirective(name) + return false, &err +} + +func isEntityExtension(directiveRefs []int, document *ast.Document) bool { + for _, directiveRef := range directiveRefs { + if document.DirectiveNameString(directiveRef) == plan.FederationKeyDirectiveName { + return true + } + } + return false +} + +func multipleExtensionError(isEntity bool, nameBytes []byte) *operationreport.ExternalError { + if isEntity { + err := operationreport.ErrEntitiesMustNotBeDuplicated(string(nameBytes)) + return &err + } + err := operationreport.ErrSharedTypesMustNotBeExtended(string(nameBytes)) + return &err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/shared_types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/shared_types.go new file mode 100644 index 00000000000..abec68c434c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/shared_types.go @@ -0,0 +1,168 @@ +package sdlmerge + +import "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + +type fieldlessSharedType interface { + areValuesIdentical(valueRefsToCompare []int) bool + valueRefs() []int + valueName(ref int) string +} + +func createValueSet(f fieldlessSharedType) map[string]bool { + valueSet := make(map[string]bool) + for _, valueRef := range f.valueRefs() { + valueSet[f.valueName(valueRef)] = true + } + return valueSet +} + +type fieldedSharedType struct { + document *ast.Document + fieldKind ast.NodeKind + fieldRefs []int + fieldSet map[string]int +} + +func newFieldedSharedType(document *ast.Document, fieldKind ast.NodeKind, fieldRefs []int) fieldedSharedType { + f := fieldedSharedType{ + document, + fieldKind, + fieldRefs, + nil, + } + f.createFieldSet() + return f +} + +func (f fieldedSharedType) areFieldsIdentical(fieldRefsToCompare []int) bool { + if len(f.fieldRefs) != len(fieldRefsToCompare) { + return false + } + for _, fieldRef := range fieldRefsToCompare { + actualFieldName := f.fieldName(fieldRef) + expectedTypeRef, exists := f.fieldSet[actualFieldName] + if !exists { + return false + } + actualTypeRef := f.fieldTypeRef(fieldRef) + if !f.document.TypesAreCompatibleDeep(expectedTypeRef, actualTypeRef) { + return false + } + } + return true +} + +func (f *fieldedSharedType) createFieldSet() { + fieldSet := make(map[string]int) + for _, fieldRef := range f.fieldRefs { + fieldSet[f.fieldName(fieldRef)] = f.fieldTypeRef(fieldRef) + } + f.fieldSet = fieldSet +} + +func (f fieldedSharedType) fieldName(ref int) string { + switch f.fieldKind { + case ast.NodeKindInputValueDefinition: + return f.document.InputValueDefinitionNameString(ref) + default: + return f.document.FieldDefinitionNameString(ref) + } +} + +func (f fieldedSharedType) fieldTypeRef(ref int) int { + switch f.fieldKind { + case ast.NodeKindInputValueDefinition: + return f.document.InputValueDefinitions[ref].Type + default: + return f.document.FieldDefinitions[ref].Type + } +} + +type enumSharedType struct { + *ast.EnumTypeDefinition + document *ast.Document + valueSet map[string]bool +} + +func newEnumSharedType(document *ast.Document, ref int) enumSharedType { + e := enumSharedType{ + &document.EnumTypeDefinitions[ref], + document, + nil, + } + e.valueSet = createValueSet(e) + return e +} + +func (e enumSharedType) areValuesIdentical(valueRefsToCompare []int) bool { + if len(e.valueRefs()) != len(valueRefsToCompare) { + return false + } + for _, valueRefToCompare := range valueRefsToCompare { + name := e.valueName(valueRefToCompare) + if !e.valueSet[name] { + return false + } + } + return true +} + +func (e enumSharedType) valueRefs() []int { + return e.EnumValuesDefinition.Refs +} + +func (e enumSharedType) valueName(ref int) string { + return e.document.EnumValueDefinitionNameString(ref) +} + +type unionSharedType struct { + *ast.UnionTypeDefinition + document *ast.Document + valueSet map[string]bool +} + +func newUnionSharedType(document *ast.Document, ref int) unionSharedType { + u := unionSharedType{ + &document.UnionTypeDefinitions[ref], + document, + nil, + } + u.valueSet = createValueSet(u) + return u +} + +func (u unionSharedType) areValuesIdentical(valueRefsToCompare []int) bool { + if len(u.valueRefs()) != len(valueRefsToCompare) { + return false + } + for _, refToCompare := range valueRefsToCompare { + name := u.valueName(refToCompare) + if !u.valueSet[name] { + return false + } + } + return true +} + +func (u unionSharedType) valueRefs() []int { + return u.UnionMemberTypes.Refs +} + +func (u unionSharedType) valueName(ref int) string { + return u.document.TypeNameString(ref) +} + +type scalarSharedType struct { +} + +func (_ scalarSharedType) areValuesIdentical(_ []int) bool { + return true +} + +func (_ scalarSharedType) valueRefs() []int { + return nil +} + +func (_ scalarSharedType) valueName(_ int) string { + return "" +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/union_type_extending.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/union_type_extending.go new file mode 100644 index 00000000000..1315ba1a8d1 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/federation/sdlmerge/union_type_extending.go @@ -0,0 +1,50 @@ +package sdlmerge + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +func newExtendUnionTypeDefinition() *extendUnionTypeDefinitionVisitor { + return &extendUnionTypeDefinitionVisitor{} +} + +type extendUnionTypeDefinitionVisitor struct { + *astvisitor.Walker + document *ast.Document +} + +func (e *extendUnionTypeDefinitionVisitor) Register(walker *astvisitor.Walker) { + e.Walker = walker + walker.RegisterEnterDocumentVisitor(e) + walker.RegisterEnterUnionTypeExtensionVisitor(e) +} + +func (e *extendUnionTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) { + e.document = operation +} + +func (e *extendUnionTypeDefinitionVisitor) EnterUnionTypeExtension(ref int) { + nodes, exists := e.document.Index.NodesByNameBytes(e.document.UnionTypeExtensionNameBytes(ref)) + if !exists { + return + } + + hasExtended := false + for i := range nodes { + if nodes[i].Kind != ast.NodeKindUnionTypeDefinition { + continue + } + if hasExtended { + e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.UnionTypeExtensionNameString(ref))) + return + } + e.document.ExtendUnionTypeDefinitionByUnionTypeExtension(nodes[i].Ref, ref) + hasExtended = true + } + + if !hasExtended { + e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.UnionTypeExtensionNameBytes(ref))) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/complexity.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/complexity.go new file mode 100644 index 00000000000..b4f7d7b6309 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/complexity.go @@ -0,0 +1,75 @@ +package graphql + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/middleware/operation_complexity" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +var DefaultComplexityCalculator = defaultComplexityCalculator{} + +type ComplexityCalculator interface { + Calculate(operation, definition *ast.Document) (ComplexityResult, error) +} + +type defaultComplexityCalculator struct { +} + +func (d defaultComplexityCalculator) Calculate(operation, definition *ast.Document) (ComplexityResult, error) { + report := operationreport.Report{} + globalComplexityResult, fieldsComplexityResult := operation_complexity.CalculateOperationComplexity(operation, definition, &report) + + return complexityResult(globalComplexityResult, fieldsComplexityResult, report) +} + +type ComplexityResult struct { + NodeCount int + Complexity int + Depth int + PerRootField []FieldComplexityResult + Errors Errors +} + +type FieldComplexityResult struct { + TypeName string + FieldName string + Alias string + NodeCount int + Complexity int + Depth int +} + +func complexityResult(globalComplexityResult operation_complexity.OperationStats, fieldsComplexityResult []operation_complexity.RootFieldStats, report operationreport.Report) (ComplexityResult, error) { + allFieldComplexityResults := make([]FieldComplexityResult, 0, len(fieldsComplexityResult)) + for _, fieldResult := range fieldsComplexityResult { + allFieldComplexityResults = append(allFieldComplexityResults, FieldComplexityResult{ + TypeName: fieldResult.TypeName, + FieldName: fieldResult.FieldName, + Alias: fieldResult.Alias, + NodeCount: fieldResult.Stats.NodeCount, + Complexity: fieldResult.Stats.Complexity, + Depth: fieldResult.Stats.Depth, + }) + } + + result := ComplexityResult{ + NodeCount: globalComplexityResult.NodeCount, + Complexity: globalComplexityResult.Complexity, + Depth: globalComplexityResult.Depth, + PerRootField: allFieldComplexityResults, + Errors: nil, + } + + if !report.HasErrors() { + return result, nil + } + + result.Errors = RequestErrorsFromOperationReport(report) + + var err error + if len(report.InternalErrors) > 0 { + err = report.InternalErrors[0] + } + + return result, err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_federation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_federation.go new file mode 100644 index 00000000000..6a1f254aaa8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_federation.go @@ -0,0 +1,181 @@ +package graphql + +import ( + "fmt" + "net/http" + "time" + + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + graphqlDataSource "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/federation" +) + +type federationEngineConfigFactoryOptions struct { + httpClient *http.Client + streamingClient *http.Client + subscriptionClientFactory graphqlDataSource.GraphQLSubscriptionClientFactory + subscriptionType SubscriptionType +} + +type FederationEngineConfigFactoryOption func(options *federationEngineConfigFactoryOptions) + +func WithFederationHttpClient(client *http.Client) FederationEngineConfigFactoryOption { + return func(options *federationEngineConfigFactoryOptions) { + options.httpClient = client + } +} + +func WithFederationStreamingClient(client *http.Client) FederationEngineConfigFactoryOption { + return func(options *federationEngineConfigFactoryOptions) { + options.streamingClient = client + } +} + +func WithFederationSubscriptionClientFactory(factory graphqlDataSource.GraphQLSubscriptionClientFactory) FederationEngineConfigFactoryOption { + return func(options *federationEngineConfigFactoryOptions) { + options.subscriptionClientFactory = factory + } +} + +func WithFederationSubscriptionType(subscriptionType SubscriptionType) FederationEngineConfigFactoryOption { + return func(options *federationEngineConfigFactoryOptions) { + options.subscriptionType = subscriptionType + } +} + +func NewFederationEngineConfigFactory(dataSourceConfigs []graphqlDataSource.Configuration, batchFactory resolve.DataSourceBatchFactory, opts ...FederationEngineConfigFactoryOption) *FederationEngineConfigFactory { + options := federationEngineConfigFactoryOptions{ + httpClient: &http.Client{ + Timeout: time.Second * 10, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1024, + TLSHandshakeTimeout: 0 * time.Second, + }, + }, + streamingClient: &http.Client{ + Timeout: 0, + }, + subscriptionClientFactory: &graphqlDataSource.DefaultSubscriptionClientFactory{}, + subscriptionType: SubscriptionTypeUnknown, + } + + for _, optFunc := range opts { + optFunc(&options) + } + + return &FederationEngineConfigFactory{ + httpClient: options.httpClient, + streamingClient: options.streamingClient, + dataSourceConfigs: dataSourceConfigs, + batchFactory: batchFactory, + subscriptionClientFactory: options.subscriptionClientFactory, + subscriptionType: options.subscriptionType, + } +} + +// FederationEngineConfigFactory is used to create a v2 engine config for a supergraph with multiple data sources for subgraphs. +type FederationEngineConfigFactory struct { + httpClient *http.Client + streamingClient *http.Client + dataSourceConfigs []graphqlDataSource.Configuration + schema *Schema + batchFactory resolve.DataSourceBatchFactory + subscriptionClientFactory graphqlDataSource.GraphQLSubscriptionClientFactory + subscriptionType SubscriptionType +} + +func (f *FederationEngineConfigFactory) SetMergedSchemaFromString(mergedSchema string) (err error) { + f.schema, err = NewSchemaFromString(mergedSchema) + if err != nil { + return fmt.Errorf("set merged schema in FederationEngineConfigFactory: %s", err.Error()) + } + return nil +} + +func (f *FederationEngineConfigFactory) MergedSchema() (*Schema, error) { + if f.schema != nil { + return f.schema, nil + } + + SDLs := make([]string, len(f.dataSourceConfigs)) + for i := range f.dataSourceConfigs { + SDLs[i] = f.dataSourceConfigs[i].Federation.ServiceSDL + } + + rawBaseSchema, err := federation.BuildBaseSchemaDocument(SDLs...) + if err != nil { + return nil, fmt.Errorf("build base schema: %w", err) + } + + if f.schema, err = NewSchemaFromString(rawBaseSchema); err != nil { + return nil, fmt.Errorf("parse schema from string: %v", err) + } + + return f.schema, nil +} + +func (f *FederationEngineConfigFactory) EngineV2Configuration() (conf EngineV2Configuration, err error) { + schema, err := f.MergedSchema() + if err != nil { + return conf, fmt.Errorf("get schema: %v", err) + } + + conf = NewEngineV2Configuration(schema) + + fieldConfigs, err := f.engineConfigFieldConfigs(schema) + if err != nil { + return conf, fmt.Errorf("create field configs: %v", err) + } + + dataSources, err := f.engineConfigDataSources() + if err != nil { + return conf, fmt.Errorf("create datasource config: %v", err) + } + + conf.SetFieldConfigurations(fieldConfigs) + conf.SetDataSources(dataSources) + + return conf, nil +} + +func (f *FederationEngineConfigFactory) engineConfigFieldConfigs(schema *Schema) (plan.FieldConfigurations, error) { + var planFieldConfigs plan.FieldConfigurations + + for _, dataSourceConfig := range f.dataSourceConfigs { + doc, report := astparser.ParseGraphqlDocumentString(dataSourceConfig.Federation.ServiceSDL) + if report.HasErrors() { + return nil, fmt.Errorf("parse graphql document string: %s", report.Error()) + } + extractor := plan.NewRequiredFieldExtractor(&doc) + planFieldConfigs = append(planFieldConfigs, extractor.GetAllRequiredFields()...) + } + + planFieldConfigs = newGraphQLFieldConfigsV2Generator(schema).Generate(planFieldConfigs...) + return planFieldConfigs, nil +} + +func (f *FederationEngineConfigFactory) engineConfigDataSources() (planDataSources []plan.DataSourceConfiguration, err error) { + for _, dataSourceConfig := range f.dataSourceConfigs { + doc, report := astparser.ParseGraphqlDocumentString(dataSourceConfig.Federation.ServiceSDL) + if report.HasErrors() { + return nil, fmt.Errorf("parse graphql document string: %s", report.Error()) + } + + planDataSource, err := newGraphQLDataSourceV2Generator(&doc).Generate( + dataSourceConfig, + f.batchFactory, + f.httpClient, + WithDataSourceV2GeneratorSubscriptionConfiguration(f.streamingClient, f.subscriptionType), + WithDataSourceV2GeneratorSubscriptionClientFactory(f.subscriptionClientFactory), + ) + if err != nil { + return nil, err + } + + planDataSources = append(planDataSources, planDataSource) + } + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_proxy.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_proxy.go new file mode 100644 index 00000000000..28d8fd2163c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/config_factory_proxy.go @@ -0,0 +1,121 @@ +package graphql + +import ( + "net/http" + "time" + + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + graphqlDataSource "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" +) + +type proxyEngineConfigFactoryOptions struct { + httpClient *http.Client + streamingClient *http.Client + subscriptionClientFactory graphqlDataSource.GraphQLSubscriptionClientFactory +} + +type ProxyEngineConfigFactoryOption func(options *proxyEngineConfigFactoryOptions) + +func WithProxyHttpClient(client *http.Client) ProxyEngineConfigFactoryOption { + return func(options *proxyEngineConfigFactoryOptions) { + options.httpClient = client + } +} + +func WithProxyStreamingClient(client *http.Client) ProxyEngineConfigFactoryOption { + return func(options *proxyEngineConfigFactoryOptions) { + options.streamingClient = client + } +} + +func WithProxySubscriptionClientFactory(factory graphqlDataSource.GraphQLSubscriptionClientFactory) ProxyEngineConfigFactoryOption { + return func(options *proxyEngineConfigFactoryOptions) { + options.subscriptionClientFactory = factory + } +} + +// ProxyUpstreamConfig holds configuration to configure a single data source to a single upstream. +type ProxyUpstreamConfig struct { + URL string + Method string + StaticHeaders http.Header + SubscriptionType SubscriptionType +} + +// ProxyEngineConfigFactory is used to create a v2 engine config with a single upstream and a single data source for this upstream. +type ProxyEngineConfigFactory struct { + httpClient *http.Client + streamingClient *http.Client + schema *Schema + proxyUpstreamConfig ProxyUpstreamConfig + batchFactory resolve.DataSourceBatchFactory + subscriptionClientFactory graphqlDataSource.GraphQLSubscriptionClientFactory +} + +func NewProxyEngineConfigFactory(schema *Schema, proxyUpstreamConfig ProxyUpstreamConfig, batchFactory resolve.DataSourceBatchFactory, opts ...ProxyEngineConfigFactoryOption) *ProxyEngineConfigFactory { + options := proxyEngineConfigFactoryOptions{ + httpClient: &http.Client{ + Timeout: time.Second * 10, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1024, + TLSHandshakeTimeout: 0 * time.Second, + }, + }, + streamingClient: &http.Client{ + Timeout: 0, + }, + subscriptionClientFactory: &graphqlDataSource.DefaultSubscriptionClientFactory{}, + } + + for _, optFunc := range opts { + optFunc(&options) + } + + return &ProxyEngineConfigFactory{ + httpClient: options.httpClient, + streamingClient: options.streamingClient, + schema: schema, + proxyUpstreamConfig: proxyUpstreamConfig, + batchFactory: batchFactory, + subscriptionClientFactory: options.subscriptionClientFactory, + } +} + +func (p *ProxyEngineConfigFactory) EngineV2Configuration() (EngineV2Configuration, error) { + dataSourceConfig := graphqlDataSource.Configuration{ + Fetch: graphqlDataSource.FetchConfiguration{ + URL: p.proxyUpstreamConfig.URL, + Method: p.proxyUpstreamConfig.Method, + Header: p.proxyUpstreamConfig.StaticHeaders, + }, + Subscription: graphqlDataSource.SubscriptionConfiguration{ + URL: p.proxyUpstreamConfig.URL, + UseSSE: p.proxyUpstreamConfig.SubscriptionType == SubscriptionTypeSSE, + }, + } + + conf := NewEngineV2Configuration(p.schema) + + rawDoc, report := astparser.ParseGraphqlDocumentBytes(p.schema.rawInput) + if report.HasErrors() { + return EngineV2Configuration{}, report + } + + dataSource, err := newGraphQLDataSourceV2Generator(&rawDoc).Generate( + dataSourceConfig, + p.batchFactory, + p.httpClient, + WithDataSourceV2GeneratorSubscriptionConfiguration(p.streamingClient, p.proxyUpstreamConfig.SubscriptionType), + WithDataSourceV2GeneratorSubscriptionClientFactory(p.subscriptionClientFactory), + ) + if err != nil { + return EngineV2Configuration{}, err + } + + conf.AddDataSource(dataSource) + fieldConfigs := newGraphQLFieldConfigsV2Generator(p.schema).Generate() + conf.SetFieldConfigurations(fieldConfigs) + + return conf, nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/engine_config_v2.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/engine_config_v2.go new file mode 100644 index 00000000000..80d361d42ef --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/engine_config_v2.go @@ -0,0 +1,232 @@ +package graphql + +import ( + "errors" + "net/http" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + graphqlDataSource "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/graphql_datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" +) + +const ( + DefaultFlushIntervalInMilliseconds = 1000 +) + +type EngineV2Configuration struct { + schema *Schema + plannerConfig plan.Configuration + websocketBeforeStartHook WebsocketBeforeStartHook + dataLoaderConfig dataLoaderConfig +} + +func NewEngineV2Configuration(schema *Schema) EngineV2Configuration { + return EngineV2Configuration{ + schema: schema, + plannerConfig: plan.Configuration{ + DefaultFlushIntervalMillis: DefaultFlushIntervalInMilliseconds, + DataSources: []plan.DataSourceConfiguration{}, + Fields: plan.FieldConfigurations{}, + }, + dataLoaderConfig: dataLoaderConfig{ + EnableSingleFlightLoader: false, + EnableDataLoader: false, + }, + } +} + +type dataLoaderConfig struct { + EnableSingleFlightLoader bool + EnableDataLoader bool +} + +func (e *EngineV2Configuration) AddDataSource(dataSource plan.DataSourceConfiguration) { + e.plannerConfig.DataSources = append(e.plannerConfig.DataSources, dataSource) +} + +func (e *EngineV2Configuration) SetDataSources(dataSources []plan.DataSourceConfiguration) { + e.plannerConfig.DataSources = dataSources +} + +func (e *EngineV2Configuration) AddFieldConfiguration(fieldConfig plan.FieldConfiguration) { + e.plannerConfig.Fields = append(e.plannerConfig.Fields, fieldConfig) +} + +func (e *EngineV2Configuration) SetFieldConfigurations(fieldConfigs plan.FieldConfigurations) { + e.plannerConfig.Fields = fieldConfigs +} + +func (e *EngineV2Configuration) DataSources() []plan.DataSourceConfiguration { + return e.plannerConfig.DataSources +} + +func (e *EngineV2Configuration) FieldConfigurations() plan.FieldConfigurations { + return e.plannerConfig.Fields +} + +func (e *EngineV2Configuration) EnableDataLoader(enable bool) { + e.dataLoaderConfig.EnableDataLoader = enable +} + +func (e *EngineV2Configuration) EnableSingleFlight(enable bool) { + e.dataLoaderConfig.EnableSingleFlightLoader = enable +} + +// SetWebsocketBeforeStartHook - sets before start hook which will be called before processing any operation sent over websockets +func (e *EngineV2Configuration) SetWebsocketBeforeStartHook(hook WebsocketBeforeStartHook) { + e.websocketBeforeStartHook = hook +} + +type dataSourceV2GeneratorOptions struct { + streamingClient *http.Client + subscriptionType SubscriptionType + subscriptionClientFactory graphqlDataSource.GraphQLSubscriptionClientFactory +} + +type DataSourceV2GeneratorOption func(options *dataSourceV2GeneratorOptions) + +func WithDataSourceV2GeneratorSubscriptionConfiguration(streamingClient *http.Client, subscriptionType SubscriptionType) DataSourceV2GeneratorOption { + return func(options *dataSourceV2GeneratorOptions) { + options.streamingClient = streamingClient + options.subscriptionType = subscriptionType + } +} + +func WithDataSourceV2GeneratorSubscriptionClientFactory(factory graphqlDataSource.GraphQLSubscriptionClientFactory) DataSourceV2GeneratorOption { + return func(options *dataSourceV2GeneratorOptions) { + options.subscriptionClientFactory = factory + } +} + +type graphqlDataSourceV2Generator struct { + document *ast.Document +} + +func newGraphQLDataSourceV2Generator(document *ast.Document) *graphqlDataSourceV2Generator { + return &graphqlDataSourceV2Generator{ + document: document, + } +} + +func (d *graphqlDataSourceV2Generator) Generate(config graphqlDataSource.Configuration, batchFactory resolve.DataSourceBatchFactory, httpClient *http.Client, options ...DataSourceV2GeneratorOption) (plan.DataSourceConfiguration, error) { + var planDataSource plan.DataSourceConfiguration + extractor := plan.NewLocalTypeFieldExtractor(d.document) + planDataSource.RootNodes, planDataSource.ChildNodes = extractor.GetAllNodes() + + definedOptions := &dataSourceV2GeneratorOptions{ + streamingClient: &http.Client{Timeout: 0}, + subscriptionType: SubscriptionTypeUnknown, + subscriptionClientFactory: &graphqlDataSource.DefaultSubscriptionClientFactory{}, + } + + for _, option := range options { + option(definedOptions) + } + + factory := &graphqlDataSource.Factory{ + HTTPClient: httpClient, + StreamingClient: definedOptions.streamingClient, + BatchFactory: batchFactory, + } + + subscriptionClient, err := d.generateSubscriptionClient(httpClient, definedOptions) + if err != nil { + return plan.DataSourceConfiguration{}, err + } + factory.SubscriptionClient = subscriptionClient + + planDataSource.Factory = factory + planDataSource.Custom = graphqlDataSource.ConfigJson(config) + + return planDataSource, nil +} + +func (d *graphqlDataSourceV2Generator) generateSubscriptionClient(httpClient *http.Client, definedOptions *dataSourceV2GeneratorOptions) (*graphqlDataSource.SubscriptionClient, error) { + var graphqlSubscriptionClient graphqlDataSource.GraphQLSubscriptionClient + switch definedOptions.subscriptionType { + case SubscriptionTypeGraphQLTransportWS: + graphqlSubscriptionClient = definedOptions.subscriptionClientFactory.NewSubscriptionClient( + httpClient, + definedOptions.streamingClient, + nil, + graphqlDataSource.WithWSSubProtocol(graphqlDataSource.ProtocolGraphQLTWS), + ) + default: + // for compatibility reasons we fall back to graphql-ws protocol + graphqlSubscriptionClient = definedOptions.subscriptionClientFactory.NewSubscriptionClient( + httpClient, + definedOptions.streamingClient, + nil, + graphqlDataSource.WithWSSubProtocol(graphqlDataSource.ProtocolGraphQLWS), + ) + } + + subscriptionClient, ok := graphqlSubscriptionClient.(*graphqlDataSource.SubscriptionClient) + if !ok { + return nil, errors.New("invalid SubscriptionClient was instantiated") + } + return subscriptionClient, nil +} + +type graphqlFieldConfigurationsV2Generator struct { + schema *Schema +} + +func newGraphQLFieldConfigsV2Generator(schema *Schema) *graphqlFieldConfigurationsV2Generator { + return &graphqlFieldConfigurationsV2Generator{ + schema: schema, + } +} + +func (g *graphqlFieldConfigurationsV2Generator) Generate(predefinedFieldConfigs ...plan.FieldConfiguration) plan.FieldConfigurations { + var planFieldConfigs plan.FieldConfigurations + if len(predefinedFieldConfigs) > 0 { + planFieldConfigs = predefinedFieldConfigs + } + + generatedArgs := g.schema.GetAllFieldArguments(NewSkipReservedNamesFunc()) + generatedArgsAsLookupMap := CreateTypeFieldArgumentsLookupMap(generatedArgs) + g.engineConfigArguments(&planFieldConfigs, generatedArgsAsLookupMap) + + return planFieldConfigs +} + +func (g *graphqlFieldConfigurationsV2Generator) engineConfigArguments(fieldConfs *plan.FieldConfigurations, generatedArgs map[TypeFieldLookupKey]TypeFieldArguments) { + for i := range *fieldConfs { + if len(generatedArgs) == 0 { + return + } + + lookupKey := CreateTypeFieldLookupKey((*fieldConfs)[i].TypeName, (*fieldConfs)[i].FieldName) + currentArgs, exists := generatedArgs[lookupKey] + if !exists { + continue + } + + (*fieldConfs)[i].Arguments = g.createArgumentConfigurationsForArgumentNames(currentArgs.ArgumentNames) + delete(generatedArgs, lookupKey) + } + + for _, genArgs := range generatedArgs { + *fieldConfs = append(*fieldConfs, plan.FieldConfiguration{ + TypeName: genArgs.TypeName, + FieldName: genArgs.FieldName, + Arguments: g.createArgumentConfigurationsForArgumentNames(genArgs.ArgumentNames), + }) + } +} + +func (g *graphqlFieldConfigurationsV2Generator) createArgumentConfigurationsForArgumentNames(argumentNames []string) plan.ArgumentsConfigurations { + argConfs := plan.ArgumentsConfigurations{} + for _, argName := range argumentNames { + argConf := plan.ArgumentConfiguration{ + Name: argName, + SourceType: plan.FieldArgumentSource, + } + + argConfs = append(argConfs, argConf) + } + + return argConfs +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/errors.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/errors.go new file mode 100644 index 00000000000..7a1c113f16d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/errors.go @@ -0,0 +1,207 @@ +package graphql + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type Errors interface { + error + WriteResponse(writer io.Writer) (n int, err error) + Count() int + ErrorByIndex(i int) error +} + +type RequestErrors []RequestError + +func RequestErrorsFromError(err error) RequestErrors { + if errors, ok := err.(RequestErrors); ok { + return errors + } + if report, ok := err.(operationreport.Report); ok { + if len(report.ExternalErrors) == 0 { + return RequestErrors{ + { + Message: "Internal Error", + }, + } + } + var errors RequestErrors + for _, externalError := range report.ExternalErrors { + errors = append(errors, RequestError{ + Message: externalError.Message, + Locations: externalError.Locations, + Path: ErrorPath{ + astPath: externalError.Path, + }, + }) + } + return errors + } + return RequestErrors{ + { + Message: err.Error(), + }, + } +} + +func RequestErrorsFromOperationReport(report operationreport.Report) (errors RequestErrors) { + if len(report.ExternalErrors) == 0 { + return nil + } + + for _, externalError := range report.ExternalErrors { + locations := make([]graphqlerrors.Location, 0) + for _, reportLocation := range externalError.Locations { + loc := graphqlerrors.Location{ + Line: reportLocation.Line, + Column: reportLocation.Column, + } + + locations = append(locations, loc) + } + + validationError := RequestError{ + Message: externalError.Message, + Path: ErrorPath{astPath: externalError.Path}, + Locations: locations, + } + + errors = append(errors, validationError) + } + + return errors +} + +func (o RequestErrors) Error() string { + if len(o) > 0 { // avoid panic ... + return o.ErrorByIndex(0).Error() + } + return "no error" // ... so, this should never be returned +} + +func (o RequestErrors) WriteResponse(writer io.Writer) (n int, err error) { + response := Response{ + Errors: o, + } + + responseBytes, err := response.Marshal() + if err != nil { + return 0, err + } + + return writer.Write(responseBytes) +} + +func (o RequestErrors) Count() int { + return len(o) +} + +func (o RequestErrors) ErrorByIndex(i int) error { + if i >= o.Count() { + return nil + } + + return o[i] +} + +type RequestError struct { + Message string `json:"message"` + Locations []graphqlerrors.Location `json:"locations,omitempty"` + Path ErrorPath `json:"path"` +} + +func (o RequestError) MarshalJSON() ([]byte, error) { + if o.Path.Len() == 0 { + return json.Marshal(struct { + Message string `json:"message"` + Locations []graphqlerrors.Location `json:"locations,omitempty"` + }{ + Message: o.Message, + Locations: o.Locations, + }) + } + path, err := o.Path.MarshalJSON() + if err != nil { + return nil, err + } + return json.Marshal(struct { + Message string `json:"message"` + Locations []graphqlerrors.Location `json:"locations,omitempty"` + Path json.RawMessage `json:"path"` + }{ + Message: o.Message, + Locations: o.Locations, + Path: path, + }) +} + +func (o RequestError) Error() string { + return fmt.Sprintf("%s, locations: %+v, path: %s", o.Message, o.Locations, o.Path.String()) +} + +type SchemaValidationErrors []SchemaValidationError + +func schemaValidationErrorsFromOperationReport(report operationreport.Report) (errors SchemaValidationErrors) { + if len(report.ExternalErrors) == 0 { + return nil + } + + for _, externalError := range report.ExternalErrors { + validationError := SchemaValidationError{ + Message: externalError.Message, + } + + errors = append(errors, validationError) + } + + return errors +} + +func (s SchemaValidationErrors) Error() string { + return fmt.Sprintf("schema contains %d error(s)", s.Count()) +} + +func (s SchemaValidationErrors) WriteResponse(writer io.Writer) (n int, err error) { + return writer.Write(nil) +} + +func (s SchemaValidationErrors) Count() int { + return len(s) +} + +func (s SchemaValidationErrors) ErrorByIndex(i int) error { + if i >= s.Count() { + return nil + } + return s[i] +} + +type SchemaValidationError struct { + Message string `json:"message"` +} + +func (s SchemaValidationError) Error() string { + return s.Message +} + +type ErrorPath struct { + astPath ast.Path +} + +func (e *ErrorPath) String() string { + return e.astPath.String() +} + +func (e *ErrorPath) MarshalJSON() ([]byte, error) { + return json.Marshal(e.astPath) +} + +func (e *ErrorPath) Len() int { + return len(e.astPath) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine.go new file mode 100644 index 00000000000..141c330c4ee --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine.go @@ -0,0 +1,173 @@ +package graphql + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "sync" + + "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/execution" + "github.com/TykTechnologies/graphql-go-tools/pkg/execution/datasource" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type DataSourceHttpJsonOptions struct { + HttpClient *http.Client + WhitelistedSchemes []string + Hooks *datasource.Hooks +} + +type DataSourceGraphqlOptions struct { + HttpClient *http.Client + WhitelistedSchemes []string + Hooks *datasource.Hooks +} + +type ExecutionOptions struct { + ExtraArguments json.RawMessage +} + +type ExecutionEngine struct { + logger abstractlogger.Logger + basePlanner *datasource.BasePlanner + executorPool *sync.Pool + schema *Schema +} + +func NewExecutionEngine(logger abstractlogger.Logger, schema *Schema, plannerConfig datasource.PlannerConfiguration) (*ExecutionEngine, error) { + executorPool := sync.Pool{ + New: func() interface{} { + return execution.NewExecutor(nil) + }, + } + + basePlanner, err := datasource.NewBaseDataSourcePlanner(schema.rawSchema, plannerConfig, logger) + if err != nil { + return nil, err + } + + return &ExecutionEngine{ + logger: logger, + basePlanner: basePlanner, + executorPool: &executorPool, + schema: schema, + }, nil +} + +func (e *ExecutionEngine) AddHttpJsonDataSource(name string) error { + return e.AddHttpJsonDataSourceWithOptions(name, DataSourceHttpJsonOptions{}) +} + +func (e *ExecutionEngine) AddHttpJsonDataSourceWithOptions(name string, options DataSourceHttpJsonOptions) error { + httpJsonFactoryFactory := &datasource.HttpJsonDataSourcePlannerFactoryFactory{} + + if options.HttpClient != nil { + httpJsonFactoryFactory.Client = options.HttpClient + } + + if len(options.WhitelistedSchemes) > 0 { + httpJsonFactoryFactory.WhitelistedSchemes = options.WhitelistedSchemes + } + + if options.Hooks != nil { + httpJsonFactoryFactory.Hooks = *options.Hooks + } + + return e.AddDataSource(name, httpJsonFactoryFactory) +} + +func (e *ExecutionEngine) AddGraphqlDataSource(name string) error { + return e.AddGraphqlDataSourceWithOptions(name, DataSourceGraphqlOptions{}) +} + +func (e *ExecutionEngine) AddGraphqlDataSourceWithOptions(name string, options DataSourceGraphqlOptions) error { + graphqlFactoryFactory := &datasource.GraphQLDataSourcePlannerFactoryFactory{} + + if options.HttpClient != nil { + graphqlFactoryFactory.Client = options.HttpClient + } + + if len(options.WhitelistedSchemes) > 0 { + graphqlFactoryFactory.WhitelistedSchemes = options.WhitelistedSchemes + } + + if options.Hooks != nil { + graphqlFactoryFactory.Hooks = *options.Hooks + } + + return e.AddDataSource(name, graphqlFactoryFactory) +} + +func (e *ExecutionEngine) AddDataSource(name string, plannerFactoryFactory datasource.PlannerFactoryFactory) error { + return e.basePlanner.RegisterDataSourcePlannerFactory(name, plannerFactoryFactory) +} + +func (e *ExecutionEngine) ExecuteWithWriter(ctx context.Context, operation *Request, writer io.Writer, options ExecutionOptions) error { + var report operationreport.Report + + if !operation.IsNormalized() { + normalizationResult, err := operation.Normalize(e.schema) + if err != nil { + return err + } + + if !normalizationResult.Successful { + return normalizationResult.Errors + } + } + + planner := execution.NewPlanner(e.basePlanner) + plan := planner.Plan(&operation.document, e.basePlanner.Definition, operation.OperationName, &report) + if report.HasErrors() { + return report + } + + variables, extraArguments := execution.VariablesFromJson(operation.Variables, options.ExtraArguments) + executionContext := execution.Context{ + Context: ctx, + Variables: variables, + ExtraArguments: extraArguments, + } + + poolExecutor := e.executorPool.Get().(*execution.Executor) + defer e.executorPool.Put(poolExecutor) + return poolExecutor.Execute(executionContext, plan, writer) +} + +func (e *ExecutionEngine) Execute(ctx context.Context, operation *Request, options ExecutionOptions) (*ExecutionResult, error) { + var buf bytes.Buffer + err := e.ExecuteWithWriter(ctx, operation, &buf, options) + return &ExecutionResult{&buf}, err +} + +func (e *ExecutionEngine) NewExecutionHandler() *execution.Handler { + return execution.NewHandler(e.basePlanner, nil) +} + +type ExecutionResult struct { + buf *bytes.Buffer +} + +func (r *ExecutionResult) Buffer() *bytes.Buffer { + return r.buf +} + +func (r *ExecutionResult) GetAsHTTPResponse() (res *http.Response) { + if r.buf == nil { + return + } + + res = &http.Response{} + res.Body = ioutil.NopCloser(r.buf) + res.Header = make(http.Header) + res.StatusCode = 200 + + res.Header.Set("Content-Type", "application/json") + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine_v2.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine_v2.go new file mode 100644 index 00000000000..a673a8a52b1 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/execution_engine_v2.go @@ -0,0 +1,322 @@ +package graphql + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "context" + "errors" + "io/ioutil" + "net/http" + "strconv" + "sync" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/introspection_datasource" + lru "github.com/hashicorp/golang-lru" + "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/datasource/httpclient" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" + "github.com/TykTechnologies/graphql-go-tools/pkg/postprocess" +) + +type EngineResultWriter struct { + buf *bytes.Buffer + flushCallback func(data []byte) +} + +func NewEngineResultWriter() EngineResultWriter { + return EngineResultWriter{ + buf: &bytes.Buffer{}, + } +} + +func NewEngineResultWriterFromBuffer(buf *bytes.Buffer) EngineResultWriter { + return EngineResultWriter{ + buf: buf, + } +} + +func (e *EngineResultWriter) SetFlushCallback(flushCb func(data []byte)) { + e.flushCallback = flushCb +} + +func (e *EngineResultWriter) Write(p []byte) (n int, err error) { + return e.buf.Write(p) +} + +func (e *EngineResultWriter) Read(p []byte) (n int, err error) { + return e.buf.Read(p) +} + +func (e *EngineResultWriter) Flush() { + if e.flushCallback != nil { + e.flushCallback(e.Bytes()) + } + + e.Reset() +} + +func (e *EngineResultWriter) Len() int { + return e.buf.Len() +} + +func (e *EngineResultWriter) Bytes() []byte { + return e.buf.Bytes() +} + +func (e *EngineResultWriter) String() string { + return e.buf.String() +} + +func (e *EngineResultWriter) Reset() { + e.buf.Reset() +} + +func (e *EngineResultWriter) AsHTTPResponse(status int, headers http.Header) *http.Response { + b := &bytes.Buffer{} + + switch headers.Get(httpclient.ContentEncodingHeader) { + case "gzip": + gzw := gzip.NewWriter(b) + _, _ = gzw.Write(e.Bytes()) + _ = gzw.Close() + case "deflate": + fw, _ := flate.NewWriter(b, 1) + _, _ = fw.Write(e.Bytes()) + _ = fw.Close() + default: + headers.Del(httpclient.ContentEncodingHeader) // delete unsupported compression header + b = e.buf + } + + res := &http.Response{} + res.Body = ioutil.NopCloser(b) + res.Header = headers + res.StatusCode = status + res.ContentLength = int64(b.Len()) + res.Header.Set("Content-Length", strconv.Itoa(b.Len())) + return res +} + +type internalExecutionContext struct { + resolveContext *resolve.Context + postProcessor *postprocess.Processor +} + +func newInternalExecutionContext() *internalExecutionContext { + return &internalExecutionContext{ + resolveContext: resolve.NewContext(context.Background()), + postProcessor: postprocess.DefaultProcessor(), + } +} + +func (e *internalExecutionContext) prepare(ctx context.Context, variables []byte, request resolve.Request) { + e.setContext(ctx) + e.setVariables(variables) + e.setRequest(request) +} + +func (e *internalExecutionContext) setRequest(request resolve.Request) { + e.resolveContext.Request = request +} + +func (e *internalExecutionContext) setContext(ctx context.Context) { + e.resolveContext.Context = ctx +} + +func (e *internalExecutionContext) setVariables(variables []byte) { + e.resolveContext.Variables = variables +} + +func (e *internalExecutionContext) reset() { + e.resolveContext.Free() +} + +type ExecutionEngineV2 struct { + logger abstractlogger.Logger + config EngineV2Configuration + planner *plan.Planner + plannerMu sync.Mutex + resolver *resolve.Resolver + internalExecutionContextPool sync.Pool + executionPlanCache *lru.Cache +} + +type WebsocketBeforeStartHook interface { + OnBeforeStart(reqCtx context.Context, operation *Request) error +} + +type ExecutionOptionsV2 func(ctx *internalExecutionContext) + +func WithBeforeFetchHook(hook resolve.BeforeFetchHook) ExecutionOptionsV2 { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.SetBeforeFetchHook(hook) + } +} + +func WithUpstreamHeaders(header http.Header) ExecutionOptionsV2 { + return func(ctx *internalExecutionContext) { + ctx.postProcessor.AddPostProcessor(postprocess.NewProcessInjectHeader(header)) + } +} + +func WithAfterFetchHook(hook resolve.AfterFetchHook) ExecutionOptionsV2 { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.SetAfterFetchHook(hook) + } +} + +func WithAdditionalHttpHeaders(headers http.Header, excludeByKeys ...string) ExecutionOptionsV2 { + return func(ctx *internalExecutionContext) { + if len(headers) == 0 { + return + } + + if ctx.resolveContext.Request.Header == nil { + ctx.resolveContext.Request.Header = make(http.Header) + } + + excludeMap := make(map[string]bool) + for _, key := range excludeByKeys { + excludeMap[key] = true + } + + for headerKey, headerValues := range headers { + if excludeMap[headerKey] { + continue + } + + for _, headerValue := range headerValues { + ctx.resolveContext.Request.Header.Add(headerKey, headerValue) + } + } + } +} + +func NewExecutionEngineV2(ctx context.Context, logger abstractlogger.Logger, engineConfig EngineV2Configuration) (*ExecutionEngineV2, error) { + executionPlanCache, err := lru.New(1024) + if err != nil { + return nil, err + } + fetcher := resolve.NewFetcher(engineConfig.dataLoaderConfig.EnableSingleFlightLoader) + + introspectionCfg, err := introspection_datasource.NewIntrospectionConfigFactory(&engineConfig.schema.document) + if err != nil { + return nil, err + } + + engineConfig.AddDataSource(introspectionCfg.BuildDataSourceConfiguration()) + for _, fieldCfg := range introspectionCfg.BuildFieldConfigurations() { + engineConfig.AddFieldConfiguration(fieldCfg) + } + + return &ExecutionEngineV2{ + logger: logger, + config: engineConfig, + planner: plan.NewPlanner(ctx, engineConfig.plannerConfig), + resolver: resolve.New(ctx, fetcher, engineConfig.dataLoaderConfig.EnableDataLoader), + internalExecutionContextPool: sync.Pool{ + New: func() interface{} { + return newInternalExecutionContext() + }, + }, + executionPlanCache: executionPlanCache, + }, nil +} + +func (e *ExecutionEngineV2) Execute(ctx context.Context, operation *Request, writer resolve.FlushWriter, options ...ExecutionOptionsV2) error { + if !operation.IsNormalized() { + result, err := operation.Normalize(e.config.schema) + if err != nil { + return err + } + + if !result.Successful { + return result.Errors + } + } + + result, err := operation.ValidateForSchema(e.config.schema) + if err != nil { + return err + } + if !result.Valid { + return result.Errors + } + + execContext := e.getExecutionCtx() + defer e.putExecutionCtx(execContext) + + execContext.prepare(ctx, operation.Variables, operation.request) + + for i := range options { + options[i](execContext) + } + + var report operationreport.Report + cachedPlan := e.getCachedPlan(execContext, &operation.document, &e.config.schema.document, operation.OperationName, &report) + if report.HasErrors() { + return report + } + + switch p := cachedPlan.(type) { + case *plan.SynchronousResponsePlan: + err = e.resolver.ResolveGraphQLResponse(execContext.resolveContext, p.Response, nil, writer) + case *plan.SubscriptionResponsePlan: + err = e.resolver.ResolveGraphQLSubscription(execContext.resolveContext, p.Response, writer) + default: + return errors.New("execution of operation is not possible") + } + + return err +} + +func (e *ExecutionEngineV2) getCachedPlan(ctx *internalExecutionContext, operation, definition *ast.Document, operationName string, report *operationreport.Report) plan.Plan { + + hash := pool.Hash64.Get() + hash.Reset() + defer pool.Hash64.Put(hash) + err := astprinter.Print(operation, definition, hash) + if err != nil { + report.AddInternalError(err) + return nil + } + + cacheKey := hash.Sum64() + + if cached, ok := e.executionPlanCache.Get(cacheKey); ok { + if p, ok := cached.(plan.Plan); ok { + return p + } + } + + e.plannerMu.Lock() + defer e.plannerMu.Unlock() + planResult := e.planner.Plan(operation, definition, operationName, report) + if report.HasErrors() { + return nil + } + + p := ctx.postProcessor.Process(planResult) + e.executionPlanCache.Add(cacheKey, p) + return p +} + +func (e *ExecutionEngineV2) GetWebsocketBeforeStartHook() WebsocketBeforeStartHook { + return e.config.websocketBeforeStartHook +} + +func (e *ExecutionEngineV2) getExecutionCtx() *internalExecutionContext { + return e.internalExecutionContextPool.Get().(*internalExecutionContext) +} + +func (e *ExecutionEngineV2) putExecutionCtx(ctx *internalExecutionContext) { + ctx.reset() + e.internalExecutionContextPool.Put(ctx) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/extractor.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/extractor.go new file mode 100644 index 00000000000..f9aed7b0ece --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/extractor.go @@ -0,0 +1,66 @@ +package graphql + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type Extractor struct { + walker *astvisitor.Walker + visitor *requestVisitor +} + +func NewExtractor() *Extractor { + walker := astvisitor.NewWalker(48) + visitor := requestVisitor{ + Walker: &walker, + } + + walker.RegisterEnterFieldVisitor(&visitor) + + return &Extractor{ + walker: &walker, + visitor: &visitor, + } +} + +func (e *Extractor) ExtractFieldsFromRequest(request *Request, schema *Schema, report *operationreport.Report, data RequestTypes) { + if !request.IsNormalized() { + result, err := request.Normalize(schema) + if err != nil { + report.AddInternalError(err) + } + + if !result.Successful { + report.AddInternalError(result.Errors) + } + } + + e.visitor.data = data + e.visitor.operation = &request.document + e.visitor.definition = &schema.document + e.walker.Walk(&request.document, &schema.document, report) +} + +type requestVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + data RequestTypes +} + +func (p *requestVisitor) EnterField(ref int) { + fieldName := p.operation.FieldNameString(ref) + parentTypeName := p.definition.NodeNameString(p.EnclosingTypeDefinition) + + t, ok := p.data[parentTypeName] + if !ok { + t = make(RequestFields) + } + + if _, ok := t[fieldName]; !ok { + t[fieldName] = struct{}{} + } + + p.data[parentTypeName] = t +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/lookup.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/lookup.go new file mode 100644 index 00000000000..c3ed7adfc0b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/lookup.go @@ -0,0 +1,24 @@ +package graphql + +import ( + "fmt" +) + +type TypeFieldLookupKey string + +func CreateTypeFieldLookupKey(typeName string, fieldName string) TypeFieldLookupKey { + return TypeFieldLookupKey(fmt.Sprintf("%s.%s", typeName, fieldName)) +} + +func CreateTypeFieldArgumentsLookupMap(typeFieldArgs []TypeFieldArguments) map[TypeFieldLookupKey]TypeFieldArguments { + if len(typeFieldArgs) == 0 { + return nil + } + + lookupMap := make(map[TypeFieldLookupKey]TypeFieldArguments) + for _, currentTypeFieldArgs := range typeFieldArgs { + lookupMap[CreateTypeFieldLookupKey(currentTypeFieldArgs.TypeName, currentTypeFieldArgs.FieldName)] = currentTypeFieldArgs + } + + return lookupMap +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/normalization.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/normalization.go new file mode 100644 index 00000000000..80594f51489 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/normalization.go @@ -0,0 +1,67 @@ +package graphql + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type NormalizationResult struct { + Successful bool + Errors Errors +} + +func (r *Request) Normalize(schema *Schema) (result NormalizationResult, err error) { + if schema == nil { + return NormalizationResult{Successful: false, Errors: nil}, ErrNilSchema + } + + report := r.parseQueryOnce() + if report.HasErrors() { + return normalizationResultFromReport(report) + } + + r.document.Input.Variables = r.Variables + + normalizer := astnormalization.NewWithOpts( + astnormalization.WithExtractVariables(), + astnormalization.WithRemoveFragmentDefinitions(), + astnormalization.WithRemoveUnusedVariables(), + ) + + if r.OperationName != "" { + normalizer.NormalizeNamedOperation(&r.document, &schema.document, []byte(r.OperationName), &report) + } else { + normalizer.NormalizeOperation(&r.document, &schema.document, &report) + } + + if report.HasErrors() { + return normalizationResultFromReport(report) + } + + r.isNormalized = true + + r.Variables = r.document.Input.Variables + + return NormalizationResult{Successful: true, Errors: nil}, nil +} + +func normalizationResultFromReport(report operationreport.Report) (NormalizationResult, error) { + result := NormalizationResult{ + Successful: false, + Errors: nil, + } + + if !report.HasErrors() { + result.Successful = true + return result, nil + } + + result.Errors = RequestErrorsFromOperationReport(report) + + var err error + if len(report.InternalErrors) > 0 { + err = report.InternalErrors[0] + } + + return result, err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request.go new file mode 100644 index 00000000000..a78c8f3d176 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request.go @@ -0,0 +1,288 @@ +package graphql + +import ( + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/middleware/operation_complexity" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const ( + schemaIntrospectionFieldName = "__schema" + typeIntrospectionFieldName = "__type" +) + +type OperationType ast.OperationType + +const ( + OperationTypeUnknown OperationType = OperationType(ast.OperationTypeUnknown) + OperationTypeQuery OperationType = OperationType(ast.OperationTypeQuery) + OperationTypeMutation OperationType = OperationType(ast.OperationTypeMutation) + OperationTypeSubscription OperationType = OperationType(ast.OperationTypeSubscription) +) + +var ( + ErrEmptyRequest = errors.New("the provided request is empty") + ErrNilSchema = errors.New("the provided schema is nil") +) + +type Request struct { + OperationName string `json:"operationName"` + Variables json.RawMessage `json:"variables"` + Query string `json:"query"` + + document ast.Document + isParsed bool + isNormalized bool + request resolve.Request + + validForSchema map[uint64]ValidationResult +} + +func UnmarshalRequest(reader io.Reader, request *Request) error { + requestBytes, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + if len(requestBytes) == 0 { + return ErrEmptyRequest + } + + return json.Unmarshal(requestBytes, &request) +} + +func UnmarshalHttpRequest(r *http.Request, request *Request) error { + request.request.Header = r.Header + return UnmarshalRequest(r.Body, request) +} + +func (r *Request) SetHeader(header http.Header) { + r.request.Header = header +} + +func (r *Request) CalculateComplexity(complexityCalculator ComplexityCalculator, schema *Schema) (ComplexityResult, error) { + if schema == nil { + return ComplexityResult{}, ErrNilSchema + } + + report := r.parseQueryOnce() + if report.HasErrors() { + return complexityResult( + operation_complexity.OperationStats{}, + []operation_complexity.RootFieldStats{}, + report, + ) + } + + return complexityCalculator.Calculate(&r.document, &schema.document) +} + +func (r Request) Print(writer io.Writer) (n int, err error) { + report := r.parseQueryOnce() + if report.HasErrors() { + return 0, report + } + + return writer.Write(r.document.Input.RawBytes) +} + +func (r *Request) IsNormalized() bool { + return r.isNormalized +} + +func (r *Request) parseQueryOnce() (report operationreport.Report) { + if r.isParsed { + return report + } + + r.document, report = astparser.ParseGraphqlDocumentString(r.Query) + if !report.HasErrors() { + // If the given query has problems, and we failed to parse it, + // we shouldn't mark it as parsed. It can be misleading for + // the rest of the components. See TT-5704. + r.isParsed = true + } + return report +} + +func (r *Request) scanOperationDefinitionsFindSelectionSet() (selectionSet *ast.SelectionSet, err error) { + report := r.parseQueryOnce() + if report.HasErrors() { + return nil, report + } + + var operationDefinitionRef = ast.InvalidRef + var possibleOperationDefinitionRefs = make([]int, 0) + + for i := 0; i < len(r.document.RootNodes); i++ { + if r.document.RootNodes[i].Kind == ast.NodeKindOperationDefinition { + possibleOperationDefinitionRefs = append(possibleOperationDefinitionRefs, r.document.RootNodes[i].Ref) + } + } + + if len(possibleOperationDefinitionRefs) == 0 { + return nil, nil + } else if len(possibleOperationDefinitionRefs) == 1 { + operationDefinitionRef = possibleOperationDefinitionRefs[0] + } else { + for i := 0; i < len(possibleOperationDefinitionRefs); i++ { + ref := possibleOperationDefinitionRefs[i] + name := r.document.OperationDefinitionNameString(ref) + + if r.OperationName == name { + operationDefinitionRef = ref + break + } + } + } + + if operationDefinitionRef == ast.InvalidRef { + return + } + + operationDef := r.document.OperationDefinitions[operationDefinitionRef] + if operationDef.OperationType != ast.OperationTypeQuery { + return + } + if !operationDef.HasSelections { + return + } + + selectionSet = &r.document.SelectionSets[operationDef.SelectionSet] + if len(selectionSet.SelectionRefs) == 0 { + return + } + + return selectionSet, nil +} + +func (r *Request) scanFragmentDefinitionsFindSelectionSets() ([]*ast.SelectionSet, error) { + report := r.parseQueryOnce() + if report.HasErrors() { + return nil, report + } + + // See the following constants: + // + // * inlineFragmentedIntrospectionQueryWithFragmentOnQuery + // * inlineFragmentedIntrospectionQueryType + // * fragmentedIntrospectionQuery + + var selectionSets []*ast.SelectionSet + for i := 0; i < len(r.document.FragmentDefinitions); i++ { + fragment := r.document.FragmentDefinitions[i] + if fragment.HasSelections { + if fragment.SelectionSet == ast.InvalidRef { + continue + } + selectionSet := r.document.SelectionSets[fragment.SelectionSet] + selectionSets = append(selectionSets, &selectionSet) + } + } + + for i := 0; i < len(r.document.InlineFragments); i++ { + inlineFragment := r.document.InlineFragments[i] + if inlineFragment.HasSelections { + if inlineFragment.SelectionSet == ast.InvalidRef { + continue + } + selectionSet := r.document.SelectionSets[inlineFragment.SelectionSet] + selectionSets = append(selectionSets, &selectionSet) + } + } + + return selectionSets, nil +} + +func (r *Request) IsIntrospectionQuery() (result bool, err error) { + selectionSet, err := r.scanOperationDefinitionsFindSelectionSet() + if err != nil { + return + } + if selectionSet == nil { + return + } + for i := 0; i < len(selectionSet.SelectionRefs); i++ { + selection := r.document.Selections[selectionSet.SelectionRefs[i]] + if selection.Kind != ast.SelectionKindField { + continue + } + fieldName := r.document.FieldNameUnsafeString(selection.Ref) + switch fieldName { + case schemaIntrospectionFieldName, typeIntrospectionFieldName: + continue + default: + return + } + } + + return true, nil +} + +// IsIntrospectionQueryStrict returns true if the client tries to query __schema or __type fields in any way. +// IsIntrospectionQuery returns false if schema/type introspection query contains additional non-introspection fields. +// This breaks the granular access schema of Tyk Gateway. +func (r *Request) IsIntrospectionQueryStrict() (result bool, err error) { + selectionSets, err := r.scanFragmentDefinitionsFindSelectionSets() + if err != nil { + return + } + selectionSet, err := r.scanOperationDefinitionsFindSelectionSet() + if err != nil { + return + } + if selectionSet != nil { + selectionSets = append(selectionSets, selectionSet) + } + + for _, selectionSetItem := range selectionSets { + for i := 0; i < len(selectionSetItem.SelectionRefs); i++ { + selection := r.document.Selections[selectionSetItem.SelectionRefs[i]] + if selection.Kind != ast.SelectionKindField { + continue + } + + fieldName := r.document.FieldNameUnsafeString(selection.Ref) + switch fieldName { + case schemaIntrospectionFieldName, typeIntrospectionFieldName: + // The query wants to access an introspection field, return true. + return true, nil + default: + // non-introspection field, continue scanning. + continue + } + } + } + + return +} + +func (r *Request) OperationType() (OperationType, error) { + report := r.parseQueryOnce() + if report.HasErrors() { + return OperationTypeUnknown, report + } + + for _, rootNode := range r.document.RootNodes { + if rootNode.Kind != ast.NodeKindOperationDefinition { + continue + } + + if r.OperationName != "" && r.document.OperationDefinitionNameString(rootNode.Ref) != r.OperationName { + continue + } + + opType := r.document.OperationDefinitions[rootNode.Ref].OperationType + return OperationType(opType), nil + } + + return OperationTypeUnknown, nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request_fields_validator.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request_fields_validator.go new file mode 100644 index 00000000000..f4a74653743 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/request_fields_validator.go @@ -0,0 +1,163 @@ +package graphql + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const asteriskCharacter = "*" + +type RequestFieldsValidator interface { + Validate(request *Request, schema *Schema, restrictions []Type) (RequestFieldsValidationResult, error) +} + +type FieldRestrictionValidator interface { + ValidateByFieldList(request *Request, schema *Schema, restrictionList FieldRestrictionList) (RequestFieldsValidationResult, error) +} + +type FieldRestrictionListKind int + +const ( + AllowList FieldRestrictionListKind = iota + BlockList +) + +type FieldRestrictionList struct { + Kind FieldRestrictionListKind + Types []Type +} + +type DefaultFieldsValidator struct { +} + +// Validate validates a request by checking if `restrictions` contains blocked fields. +// +// Deprecated: This function can only handle blocked fields. Use `ValidateByFieldList` if you +// want to check for blocked or allowed fields instead. +func (d DefaultFieldsValidator) Validate(request *Request, schema *Schema, restrictions []Type) (RequestFieldsValidationResult, error) { + restrictionList := FieldRestrictionList{ + Kind: BlockList, + Types: restrictions, + } + + return d.ValidateByFieldList(request, schema, restrictionList) +} + +// ValidateByFieldList will validate a request by using a list of allowed or blocked fields. +func (d DefaultFieldsValidator) ValidateByFieldList(request *Request, schema *Schema, restrictionList FieldRestrictionList) (RequestFieldsValidationResult, error) { + report := operationreport.Report{} + if len(restrictionList.Types) == 0 { + return fieldsValidationResult(report, true, "", "") + } + + requestedTypes := make(RequestTypes) + NewExtractor().ExtractFieldsFromRequest(request, schema, &report, requestedTypes) + + if restrictionList.Kind == BlockList { + return d.checkForBlockedFields(restrictionList, requestedTypes, report) + } + + return d.checkForAllowedFields(restrictionList, requestedTypes, report) +} + +func (d DefaultFieldsValidator) checkForBlockedFields(restrictionList FieldRestrictionList, requestTypes RequestTypes, report operationreport.Report) (RequestFieldsValidationResult, error) { + restrictedFieldsLookupMap := make(map[string]map[string]bool) + for _, restrictedType := range restrictionList.Types { + restrictedFieldsLookupMap[restrictedType.Name] = make(map[string]bool) + for _, restrictedField := range restrictedType.Fields { + restrictedFieldsLookupMap[restrictedType.Name][restrictedField] = true + } + } + + for requestType, requestFields := range requestTypes { + for requestField := range requestFields { + if _, ok := restrictedFieldsLookupMap[requestType][asteriskCharacter]; ok { + return fieldsValidationResultForAsterisk(report, false, requestType) + } + + isRestrictedType := restrictedFieldsLookupMap[requestType][requestField] + if isRestrictedType { + return fieldsValidationResult(report, false, requestType, requestField) + } + } + } + + return fieldsValidationResult(report, true, "", "") +} + +func (d DefaultFieldsValidator) checkForAllowedFields(restrictionList FieldRestrictionList, requestTypes RequestTypes, report operationreport.Report) (RequestFieldsValidationResult, error) { + // Group allowed fields and types for easy access. + allowedFieldsLookupMap := make(map[string]map[string]bool) + for _, allowedType := range restrictionList.Types { + allowedFieldsLookupMap[allowedType.Name] = make(map[string]bool) + for _, allowedField := range allowedType.Fields { + allowedFieldsLookupMap[allowedType.Name][allowedField] = true + } + } + + // Try to find a disallowed field. + for requestType, requestFields := range requestTypes { + if _, ok := allowedFieldsLookupMap[requestType][asteriskCharacter]; ok { + // Every field is allowed to access for this type. + continue + } + + for requestField := range requestFields { + isAllowedField := allowedFieldsLookupMap[requestType][requestField] + if !isAllowedField { + // The requested field is not allowed to access. + return fieldsValidationResult(report, false, requestType, requestField) + } + } + } + + return fieldsValidationResult(report, true, "", "") +} + +type RequestFieldsValidationResult struct { + Valid bool + Errors Errors +} + +func fieldsValidationResultCommon(report operationreport.Report, valid bool, requestErrors RequestErrors) (RequestFieldsValidationResult, error) { + result := RequestFieldsValidationResult{ + Valid: valid, + Errors: nil, + } + + result.Errors = requestErrors + if !report.HasErrors() { + return result, nil + } + + requestErrors = append(requestErrors, RequestErrorsFromOperationReport(report)...) + result.Errors = requestErrors + + var err error + if len(report.InternalErrors) > 0 { + err = report.InternalErrors[0] + } + + return result, err +} + +func fieldsValidationResult(report operationreport.Report, valid bool, typeName, fieldName string) (RequestFieldsValidationResult, error) { + var requestErrors RequestErrors + if !valid { + requestErrors = append(requestErrors, RequestError{ + Message: fmt.Sprintf("field: %s is restricted on type: %s", fieldName, typeName), + }) + } + return fieldsValidationResultCommon(report, valid, requestErrors) +} + +func fieldsValidationResultForAsterisk(report operationreport.Report, valid bool, typeName string) (RequestFieldsValidationResult, error) { + var requestErrors RequestErrors + if !valid { + requestErrors = append(requestErrors, RequestError{ + Message: fmt.Sprintf("all fields of %s type are restricted", typeName), + }) + } + return fieldsValidationResultCommon(report, valid, requestErrors) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/response.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/response.go new file mode 100644 index 00000000000..1360341cf47 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/response.go @@ -0,0 +1,15 @@ +package graphql + +import ( + "encoding/json" +) + +type Response struct { + Errors Errors `json:"errors,omitempty"` + // TODO: Data + // TODO: Extensions +} + +func (r Response) Marshal() ([]byte, error) { + return json.Marshal(r) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/schema.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/schema.go new file mode 100644 index 00000000000..6c544ed3f14 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/schema.go @@ -0,0 +1,460 @@ +package graphql + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/astprinter" + "github.com/TykTechnologies/graphql-go-tools/pkg/asttransform" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/TykTechnologies/graphql-go-tools/pkg/pool" +) + +type TypeFields struct { + TypeName string + FieldNames []string +} + +type TypeFieldArguments struct { + TypeName string + FieldName string + ArgumentNames []string +} + +type Schema struct { + rawInput []byte + rawSchema []byte + document ast.Document + isNormalized bool + hash uint64 +} + +func (s *Schema) Hash() (uint64, error) { + if s.hash != 0 { + return s.hash, nil + } + h := pool.Hash64.Get() + h.Reset() + defer pool.Hash64.Put(h) + printer := astprinter.Printer{} + err := printer.Print(&s.document, nil, h) + if err != nil { + return 0, err + } + s.hash = h.Sum64() + return s.hash, nil +} + +func NewSchemaFromReader(reader io.Reader) (*Schema, error) { + schemaContent, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + return createSchema(schemaContent, true) +} + +func NewSchemaFromString(schema string) (*Schema, error) { + schemaContent := []byte(schema) + + return createSchema(schemaContent, true) +} + +func ValidateSchemaString(schema string) (result ValidationResult, err error) { + parsedSchema, err := NewSchemaFromString(schema) + if err != nil { + return ValidationResult{ + Valid: false, + Errors: SchemaValidationErrors{ + SchemaValidationError{Message: err.Error()}, + }, + }, nil + } + + return parsedSchema.Validate() +} + +func (s *Schema) Normalize() (result NormalizationResult, err error) { + if s.isNormalized { + return NormalizationResult{ + Successful: true, + Errors: nil, + }, nil + } + + report := operationreport.Report{} + astnormalization.NormalizeDefinition(&s.document, &report) + if report.HasErrors() { + return normalizationResultFromReport(report) + } + + normalizedSchemaBuffer := &bytes.Buffer{} + err = astprinter.PrintIndent(&s.document, nil, []byte(" "), normalizedSchemaBuffer) + if err != nil { + return NormalizationResult{ + Successful: false, + Errors: nil, + }, err + } + + normalizedSchema, err := createSchema(normalizedSchemaBuffer.Bytes(), false) + if err != nil { + return NormalizationResult{ + Successful: false, + Errors: nil, + }, err + } + + s.rawSchema = normalizedSchema.rawSchema + s.document = normalizedSchema.document + s.isNormalized = true + return NormalizationResult{Successful: true, Errors: nil}, nil +} + +func (s *Schema) Input() []byte { + return s.rawInput +} + +func (s *Schema) Document() []byte { + return s.rawSchema +} + +// HasQueryType TODO: should be deprecated? +func (s *Schema) HasQueryType() bool { + return len(s.document.Index.QueryTypeName) > 0 +} + +func (s *Schema) QueryTypeName() string { + return string(s.document.Index.QueryTypeName) +} + +func (s *Schema) IsNormalized() bool { + return s.isNormalized +} + +func (s *Schema) HasMutationType() bool { + return len(s.document.Index.MutationTypeName) > 0 +} + +func (s *Schema) MutationTypeName() string { + if !s.HasMutationType() { + return "" + } + + return string(s.document.Index.MutationTypeName) +} + +func (s *Schema) HasSubscriptionType() bool { + return len(s.document.Index.SubscriptionTypeName) > 0 +} + +func (s *Schema) SubscriptionTypeName() string { + if !s.HasSubscriptionType() { + return "" + } + + return string(s.document.Index.SubscriptionTypeName) +} + +func (s *Schema) Validate() (result ValidationResult, err error) { + var report operationreport.Report + var isValid bool + + validator := astvalidation.DefaultDefinitionValidator() + validationState := validator.Validate(&s.document, &report) + if validationState == astvalidation.Valid { + isValid = true + } + + return ValidationResult{ + Valid: isValid, + Errors: schemaValidationErrorsFromOperationReport(report), + }, nil +} + +// IntrospectionResponse - writes full schema introspection response into writer +func (s *Schema) IntrospectionResponse(out io.Writer) error { + var ( + introspectionData = struct { + Data introspection.Data `json:"data"` + }{} + report operationreport.Report + ) + gen := introspection.NewGenerator() + gen.Generate(&s.document, &report, &introspectionData.Data) + if report.HasErrors() { + return report + } + return json.NewEncoder(out).Encode(introspectionData) +} + +func (s *Schema) GetAllFieldArguments(skipFieldFuncs ...SkipFieldFunc) []TypeFieldArguments { + objectTypeExtensions := make(map[string]ast.ObjectTypeExtension) + for _, objectTypeExtension := range s.document.ObjectTypeExtensions { + typeName, ok := s.typeNameOfObjectTypeIfHavingFields(objectTypeExtension.ObjectTypeDefinition) + if !ok { + continue + } + + objectTypeExtensions[typeName] = objectTypeExtension + } + + typeFieldArguments := make([]TypeFieldArguments, 0) + for _, objectType := range s.document.ObjectTypeDefinitions { + typeName, ok := s.typeNameOfObjectTypeIfHavingFields(objectType) + if !ok { + continue + } + + for _, fieldRef := range objectType.FieldsDefinition.Refs { + fieldName, skip := s.determineIfFieldWithFieldNameShouldBeSkipped(fieldRef, typeName, skipFieldFuncs...) + if skip { + continue + } + + s.addTypeFieldArgsForFieldRef(fieldRef, typeName, fieldName, &typeFieldArguments) + } + + objectTypeExt, ok := objectTypeExtensions[typeName] + if !ok { + continue + } + + for _, fieldRef := range objectTypeExt.FieldsDefinition.Refs { + fieldName, skip := s.determineIfFieldWithFieldNameShouldBeSkipped(fieldRef, typeName, skipFieldFuncs...) + if skip { + continue + } + + s.addTypeFieldArgsForFieldRef(fieldRef, typeName, fieldName, &typeFieldArguments) + } + } + + return typeFieldArguments +} + +func (s *Schema) typeNameOfObjectTypeIfHavingFields(objectType ast.ObjectTypeDefinition) (typeName string, ok bool) { + if !objectType.HasFieldDefinitions { + return "", false + } + + return s.document.Input.ByteSliceString(objectType.Name), true +} + +func (s *Schema) fieldNameOfFieldDefinitionIfHavingArguments(field ast.FieldDefinition, ref int) (fieldName string, ok bool) { + if !field.HasArgumentsDefinitions { + return "", false + } + + return s.document.FieldDefinitionNameString(ref), true +} + +func (s *Schema) determineIfFieldWithFieldNameShouldBeSkipped(ref int, typeName string, skipFieldFuncs ...SkipFieldFunc) (fieldName string, skip bool) { + field := s.document.FieldDefinitions[ref] + fieldName, ok := s.fieldNameOfFieldDefinitionIfHavingArguments(field, ref) + if !ok { + return fieldName, true + } + + for _, skipFieldFunc := range skipFieldFuncs { + if skipFieldFunc != nil && skipFieldFunc(typeName, fieldName, s.document) { + skip = true + break + } + } + + return fieldName, skip +} + +func (s *Schema) addTypeFieldArgsForFieldRef(ref int, typeName string, fieldName string, fieldArguments *[]TypeFieldArguments) { + currentTypeFieldArgs := TypeFieldArguments{ + TypeName: typeName, + FieldName: fieldName, + ArgumentNames: make([]string, 0), + } + + for _, argRef := range s.document.FieldDefinitions[ref].ArgumentsDefinition.Refs { + argName := s.document.InputValueDefinitionNameString(argRef) + currentTypeFieldArgs.ArgumentNames = append(currentTypeFieldArgs.ArgumentNames, string(argName)) + } + + *fieldArguments = append(*fieldArguments, currentTypeFieldArgs) +} + +func (s *Schema) GetAllNestedFieldChildrenFromTypeField(typeName string, fieldName string, skipFieldFuncs ...SkipFieldFunc) []TypeFields { + node, fields := s.nodeFieldRefs(typeName) + if len(fields) == 0 { + return nil + } + childNodes := make([]TypeFields, 0) + s.findInterfaceImplementations(node, &childNodes, skipFieldFuncs...) + for _, ref := range fields { + if fieldName == s.document.FieldDefinitionNameString(ref) { + fieldTypeName := s.document.FieldDefinitionTypeNode(ref).NameString(&s.document) + s.findNestedFieldChildren(fieldTypeName, &childNodes, skipFieldFuncs...) + return childNodes + } + } + + return nil +} + +func (s *Schema) findInterfaceImplementations(node ast.Node, childNodes *[]TypeFields, skipFieldFuncs ...SkipFieldFunc) { + if node.Kind != ast.NodeKindInterfaceTypeDefinition { + return + } + + implementingNodes := s.document.InterfaceTypeDefinitionImplementedByRootNodes(node.Ref) + for i := 0; i < len(implementingNodes); i++ { + var typeName string + switch implementingNodes[i].Kind { + case ast.NodeKindObjectTypeDefinition: + typeName = s.document.ObjectTypeDefinitionNameString(implementingNodes[i].Ref) + case ast.NodeKindInterfaceTypeDefinition: + typeName = s.document.InterfaceTypeDefinitionNameString(implementingNodes[i].Ref) + } + + s.findNestedFieldChildren(typeName, childNodes, skipFieldFuncs...) + } +} + +func (s *Schema) findNestedFieldChildren(typeName string, childNodes *[]TypeFields, skipFieldFuncs ...SkipFieldFunc) { + node, fields := s.nodeFieldRefs(typeName) + if len(fields) == 0 { + return + } + + s.findInterfaceImplementations(node, childNodes, skipFieldFuncs...) + for _, ref := range fields { + fieldName := s.document.FieldDefinitionNameString(ref) + if len(skipFieldFuncs) > 0 { + skip := false + for _, skipFieldFunc := range skipFieldFuncs { + if skipFieldFunc != nil && skipFieldFunc(typeName, fieldName, s.document) { + skip = true + break + } + } + + if skip { + continue + } + } + + if added := s.putChildNode(childNodes, typeName, fieldName); !added { + continue + } + + fieldTypeName := s.document.FieldDefinitionTypeNode(ref).NameString(&s.document) + s.findNestedFieldChildren(fieldTypeName, childNodes, skipFieldFuncs...) + } +} + +func (s *Schema) nodeFieldRefs(typeName string) (node ast.Node, fieldsRefs []int) { + node, exists := s.document.Index.FirstNodeByNameStr(typeName) + if !exists { + return ast.Node{}, nil + } + + switch node.Kind { + case ast.NodeKindObjectTypeDefinition: + fieldsRefs = s.document.ObjectTypeDefinitions[node.Ref].FieldsDefinition.Refs + case ast.NodeKindInterfaceTypeDefinition: + fieldsRefs = s.document.InterfaceTypeDefinitions[node.Ref].FieldsDefinition.Refs + default: + return ast.Node{}, nil + } + + return node, fieldsRefs +} + +func (s *Schema) putChildNode(nodes *[]TypeFields, typeName, fieldName string) (added bool) { + for i := range *nodes { + if typeName != (*nodes)[i].TypeName { + continue + } + for j := range (*nodes)[i].FieldNames { + if fieldName == (*nodes)[i].FieldNames[j] { + return false + } + } + (*nodes)[i].FieldNames = append((*nodes)[i].FieldNames, fieldName) + return true + } + *nodes = append(*nodes, TypeFields{ + TypeName: typeName, + FieldNames: []string{fieldName}, + }) + return true +} + +func createSchema(schemaContent []byte, mergeWithBaseSchema bool) (*Schema, error) { + document, report := astparser.ParseGraphqlDocumentBytes(schemaContent) + if report.HasErrors() { + return nil, report + } + + rawSchema := schemaContent + if mergeWithBaseSchema { + err := asttransform.MergeDefinitionWithBaseSchema(&document) + if err != nil { + return nil, err + } + + rawSchemaBuffer := &bytes.Buffer{} + err = astprinter.PrintIndent(&document, nil, []byte(" "), rawSchemaBuffer) + if err != nil { + return nil, err + } + + rawSchema = rawSchemaBuffer.Bytes() + } + + return &Schema{ + rawInput: schemaContent, + rawSchema: rawSchema, + document: document, + }, nil +} + +func SchemaIntrospection(schema *Schema) (*ExecutionResult, error) { + var buf bytes.Buffer + err := schema.IntrospectionResponse(&buf) + return &ExecutionResult{&buf}, err +} + +type SkipFieldFunc func(typeName, fieldName string, definition ast.Document) bool + +func NewIsDataSourceConfigV2RootFieldSkipFunc(dataSources []plan.DataSourceConfiguration) SkipFieldFunc { + return func(typeName, fieldName string, _ ast.Document) bool { + for i := range dataSources { + for j := range dataSources[i].RootNodes { + if typeName != dataSources[i].RootNodes[j].TypeName { + continue + } + for k := range dataSources[i].RootNodes[j].FieldNames { + if fieldName == dataSources[i].RootNodes[j].FieldNames[k] { + return true + } + } + } + } + return false + } +} + +func NewSkipReservedNamesFunc() SkipFieldFunc { + return func(typeName, fieldName string, _ ast.Document) bool { + prefix := "__" + return strings.HasPrefix(typeName, prefix) || strings.HasPrefix(fieldName, prefix) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/subscription.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/subscription.go new file mode 100644 index 00000000000..869fb0a5724 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/subscription.go @@ -0,0 +1,16 @@ +package graphql + +type SubscriptionType int + +const ( + // SubscriptionTypeUnknown is for unknown or undefined subscriptions. + SubscriptionTypeUnknown = iota + // SubscriptionTypeSSE is for Server-Sent Events (SSE) subscriptions. + SubscriptionTypeSSE + // SubscriptionTypeGraphQLWS is for subscriptions using a WebSocket connection with + // 'graphql-ws' as protocol. + SubscriptionTypeGraphQLWS + // SubscriptionTypeGraphQLTransportWS is for subscriptions using a WebSocket connection with + // 'graphql-transport-ws' as protocol. + SubscriptionTypeGraphQLTransportWS +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/types.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/types.go new file mode 100644 index 00000000000..f9844bf95e4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/types.go @@ -0,0 +1,11 @@ +package graphql + +type ( + Type struct { + Name string `json:"name"` + Fields []string `json:"fields"` + } + + RequestFields map[string]struct{} + RequestTypes map[string]RequestFields +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/validation.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/validation.go new file mode 100644 index 00000000000..79745c38bf2 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphql/validation.go @@ -0,0 +1,97 @@ +package graphql + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/astvalidation" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type ValidationResult struct { + Valid bool + Errors Errors +} + +func (r *Request) ValidateForSchema(schema *Schema) (result ValidationResult, err error) { + if schema == nil { + return ValidationResult{Valid: false, Errors: nil}, ErrNilSchema + } + + schemaHash, err := schema.Hash() + if err != nil { + return ValidationResult{Valid: false}, err + } + + if r.validForSchema == nil { + r.validForSchema = map[uint64]ValidationResult{} + } + + if result, ok := r.validForSchema[schemaHash]; ok { + return result, nil + } + + report := r.parseQueryOnce() + if report.HasErrors() { + return operationValidationResultFromReport(report) + } + + validator := astvalidation.DefaultOperationValidator() + validator.Validate(&r.document, &schema.document, &report) + result, err = operationValidationResultFromReport(report) + if err != nil { + return result, err + } + r.validForSchema[schemaHash] = result + return result, err +} + +// ValidateRestrictedFields validates a request by checking if `restrictedFields` contains blocked fields. +// +// Deprecated: This function can only handle blocked fields. Use `ValidateFieldRestrictions` if you +// want to check for blocked or allowed fields instead. +func (r *Request) ValidateRestrictedFields(schema *Schema, restrictedFields []Type) (RequestFieldsValidationResult, error) { + if schema == nil { + return RequestFieldsValidationResult{Valid: false}, ErrNilSchema + } + + report := r.parseQueryOnce() + if report.HasErrors() { + return fieldsValidationResult(report, false, "", "") + } + + var fieldsValidator RequestFieldsValidator = DefaultFieldsValidator{} + return fieldsValidator.Validate(r, schema, restrictedFields) +} + +// ValidateFieldRestrictions will validate a request by using a list of allowed or blocked fields. +func (r *Request) ValidateFieldRestrictions(schema *Schema, restrictedFieldsList FieldRestrictionList, validator FieldRestrictionValidator) (RequestFieldsValidationResult, error) { + if schema == nil { + return RequestFieldsValidationResult{Valid: false}, ErrNilSchema + } + + report := r.parseQueryOnce() + if report.HasErrors() { + return fieldsValidationResult(report, false, "", "") + } + + return validator.ValidateByFieldList(r, schema, restrictedFieldsList) +} + +func operationValidationResultFromReport(report operationreport.Report) (ValidationResult, error) { + result := ValidationResult{ + Valid: false, + Errors: nil, + } + + if !report.HasErrors() { + result.Valid = true + return result, nil + } + + result.Errors = RequestErrorsFromOperationReport(report) + + var err error + if len(report.InternalErrors) > 0 { + err = report.InternalErrors[0] + } + + return result, err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors/location.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors/location.go new file mode 100644 index 00000000000..4658acd2149 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors/location.go @@ -0,0 +1,6 @@ +package graphqlerrors + +type Location struct { + Line uint32 `json:"line"` + Column uint32 `json:"column"` +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqljsonschema/jsonschema.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqljsonschema/jsonschema.go new file mode 100644 index 00000000000..dedd71791de --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/graphqljsonschema/jsonschema.go @@ -0,0 +1,422 @@ +package graphqljsonschema + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/buger/jsonparser" + "github.com/qri-io/jsonschema" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" +) + +type options struct { + overrides map[string]JsonSchema + path []string +} + +type Option func(opts *options) + +func WithOverrides(overrides map[string]JsonSchema) Option { + return func(opts *options) { + opts.overrides = overrides + } +} + +func WithPath(path []string) Option { + return func(opts *options) { + opts.path = path + } +} + +func FromTypeRef(operation, definition *ast.Document, typeRef int, opts ...Option) JsonSchema { + appliedOptions := &options{} + for _, opt := range opts { + opt(appliedOptions) + } + + var resolver *fromTypeRefResolver + if len(appliedOptions.overrides) > 0 { + resolver = &fromTypeRefResolver{ + overrides: appliedOptions.overrides, + } + } else { + resolver = &fromTypeRefResolver{ + overrides: map[string]JsonSchema{}, + } + } + + jsonSchema := resolver.fromTypeRef(operation, definition, typeRef) + return resolveJsonSchemaPath(jsonSchema, appliedOptions.path) +} + +func resolveJsonSchemaPath(jsonSchema JsonSchema, path []string) JsonSchema { + switch typedJsonSchema := jsonSchema.(type) { + case Object: + for i := 0; i < len(path); i++ { + propertyJsonSchema, exists := typedJsonSchema.Properties[path[i]] + if !exists { + return jsonSchema + } + jsonSchema = propertyJsonSchema + } + } + + return jsonSchema +} + +type fromTypeRefResolver struct { + overrides map[string]JsonSchema + defs *map[string]JsonSchema +} + +func (r *fromTypeRefResolver) fromTypeRef(operation, definition *ast.Document, typeRef int) JsonSchema { + + t := operation.Types[typeRef] + + nonNull := false + if operation.TypeIsNonNull(typeRef) { + t = operation.Types[t.OfType] + nonNull = true + } + + switch t.TypeKind { + case ast.TypeKindList: + var defs map[string]JsonSchema + isRoot := false + if r.defs == nil { + defs = make(map[string]JsonSchema, 48) + r.defs = &defs + isRoot = true + } + itemSchema := r.fromTypeRef(operation, definition, t.OfType) + arr := NewArray(itemSchema, nonNull) + if isRoot { + arr.Defs = defs + } + return arr + case ast.TypeKindNonNull: + panic("Should not be able to have multiple levels of non-null") + case ast.TypeKindNamed: + name := operation.Input.ByteSliceString(t.Name) + if schema, ok := r.overrides[name]; ok { + return schema + } + typeDefinitionNode, ok := definition.Index.FirstNodeByNameStr(name) + if !ok { + return nil + } + if typeDefinitionNode.Kind == ast.NodeKindEnumTypeDefinition { + return NewString(nonNull) + } + if typeDefinitionNode.Kind == ast.NodeKindScalarTypeDefinition { + switch name { + case "Boolean": + return NewBoolean(nonNull) + case "String": + return NewString(nonNull) + case "ID": + return NewID(nonNull) + case "Int": + return NewInteger(nonNull) + case "Float": + return NewNumber(nonNull) + case "_Any": + return NewObjectAny(nonNull) + default: + return NewAny() + } + } + object := NewObject(nonNull) + isRootObject := false + if r.defs == nil { + isRootObject = true + object.Defs = make(map[string]JsonSchema, 48) + r.defs = &object.Defs + } + if !isRootObject { + if _, exists := (*r.defs)[name]; exists { + return NewRef(name) + } + (*r.defs)[name] = object + } + if node, ok := definition.Index.FirstNodeByNameStr(name); ok { + switch node.Kind { + case ast.NodeKindInputObjectTypeDefinition: + for _, ref := range definition.InputObjectTypeDefinitions[node.Ref].InputFieldsDefinition.Refs { + fieldName := definition.Input.ByteSliceString(definition.InputValueDefinitions[ref].Name) + fieldType := definition.InputValueDefinitions[ref].Type + fieldSchema := r.fromTypeRef(definition, definition, fieldType) + object.Properties[fieldName] = fieldSchema + if definition.TypeIsNonNull(fieldType) { + object.Required = append(object.Required, fieldName) + } + } + case ast.NodeKindObjectTypeDefinition: + for _, ref := range definition.ObjectTypeDefinitions[node.Ref].FieldsDefinition.Refs { + fieldName := definition.Input.ByteSliceString(definition.FieldDefinitions[ref].Name) + fieldType := definition.FieldDefinitions[ref].Type + fieldSchema := r.fromTypeRef(definition, definition, fieldType) + object.Properties[fieldName] = fieldSchema + if definition.TypeIsNonNull(fieldType) { + object.Required = append(object.Required, fieldName) + } + } + } + } + if !isRootObject { + (*r.defs)[name] = object + return NewRef(name) + } + return object + } + return NewObject(nonNull) +} + +type Validator struct { + schema jsonschema.Schema +} + +func NewValidatorFromSchema(schema JsonSchema) (*Validator, error) { + s, err := json.Marshal(schema) + if err != nil { + return nil, err + } + return NewValidatorFromString(string(s)) +} + +func MustNewValidatorFromSchema(schema JsonSchema) *Validator { + s, err := json.Marshal(schema) + if err != nil { + panic(err) + } + return MustNewValidatorFromString(string(s)) +} + +func NewValidatorFromString(schema string) (*Validator, error) { + var validator Validator + err := json.Unmarshal([]byte(schema), &validator.schema) + if err != nil { + return nil, err + } + return &validator, nil +} + +func MustNewValidatorFromString(schema string) *Validator { + var validator Validator + err := json.Unmarshal([]byte(schema), &validator.schema) + if err != nil { + panic(err) + } + return &validator +} + +func TopLevelType(schema string) (jsonparser.ValueType, error) { + var jsonSchema jsonschema.Schema + err := json.Unmarshal([]byte(schema), &jsonSchema) + if err != nil { + return jsonparser.Unknown, err + } + switch jsonSchema.TopLevelType() { + case "boolean": + return jsonparser.Boolean, nil + case "string": + return jsonparser.String, nil + case "object": + return jsonparser.Object, nil + case "number": + return jsonparser.Number, nil + case "integer": + return jsonparser.Number, nil + case "null": + return jsonparser.Null, nil + case "array": + return jsonparser.Array, nil + default: + return jsonparser.NotExist, nil + } +} + +func (v *Validator) Validate(ctx context.Context, inputJSON []byte) error { + errs, err := v.schema.ValidateBytes(ctx, inputJSON) + if err != nil { + // There was an issue performing the validation itself. Return a + // generic error so the input isn't exposed. + return fmt.Errorf("could not perform validation") + } + if len(errs) > 0 { + messages := make([]string, len(errs)) + for i := range errs { + messages[i] = errs[i].Error() + } + return fmt.Errorf("validation failed: %v", strings.Join(messages, "; ")) + } + return nil +} + +type Kind int + +const ( + StringKind Kind = iota + 1 + NumberKind + BooleanKind + IntegerKind + ObjectKind + ArrayKind + AnyKind + IDKind + RefKind +) + +func maybeAppendNull(nonNull bool, types ...string) []string { + if nonNull { + return types + } + return append(types, "null") +} + +type JsonSchema interface { + Kind() Kind +} + +type Any struct{} + +func NewAny() Any { + return Any{} +} + +func (a Any) Kind() Kind { + return AnyKind +} + +type String struct { + Type []string `json:"type"` +} + +func (_ String) Kind() Kind { + return StringKind +} + +func NewString(nonNull bool) String { + return String{ + Type: maybeAppendNull(nonNull, "string"), + } +} + +type ID struct { + Type []string `json:"type"` +} + +func (_ ID) Kind() Kind { + return IDKind +} + +func NewID(nonNull bool) ID { + return ID{ + Type: maybeAppendNull(nonNull, "string", "integer"), + } +} + +type Boolean struct { + Type []string `json:"type"` +} + +func (_ Boolean) Kind() Kind { + return BooleanKind +} + +func NewBoolean(nonNull bool) Boolean { + return Boolean{ + Type: maybeAppendNull(nonNull, "boolean"), + } +} + +type Number struct { + Type []string `json:"type"` +} + +func NewNumber(nonNull bool) Number { + return Number{ + Type: maybeAppendNull(nonNull, "number"), + } +} + +func (_ Number) Kind() Kind { + return NumberKind +} + +type Integer struct { + Type []string `json:"type"` +} + +func (_ Integer) Kind() Kind { + return IntegerKind +} + +func NewInteger(nonNull bool) Integer { + return Integer{ + Type: maybeAppendNull(nonNull, "integer"), + } +} + +type Ref struct { + Ref string `json:"$ref"` +} + +func (_ Ref) Kind() Kind { + return RefKind +} + +func NewRef(definitionName string) Ref { + return Ref{ + Ref: fmt.Sprintf("#/$defs/%s", definitionName), + } +} + +type Object struct { + Type []string `json:"type"` + Properties map[string]JsonSchema `json:"properties,omitempty"` + Required []string `json:"required,omitempty"` + AdditionalProperties bool `json:"additionalProperties"` + Defs map[string]JsonSchema `json:"$defs,omitempty"` +} + +func (_ Object) Kind() Kind { + return ObjectKind +} + +func NewObject(nonNull bool) Object { + return Object{ + Type: maybeAppendNull(nonNull, "object"), + Properties: map[string]JsonSchema{}, + AdditionalProperties: false, + } +} + +func NewObjectAny(nonNull bool) Object { + return Object{ + Type: maybeAppendNull(nonNull, "object"), + Properties: map[string]JsonSchema{}, + AdditionalProperties: true, + } +} + +type Array struct { + Type []string `json:"type"` + Items JsonSchema `json:"items"` + MinItems *int `json:"minItems,omitempty"` + Defs map[string]JsonSchema `json:"$defs,omitempty"` +} + +func (_ Array) Kind() Kind { + return ArrayKind +} + +func NewArray(itemSchema JsonSchema, nonNull bool) Array { + return Array{ + Type: maybeAppendNull(nonNull, "array"), + Items: itemSchema, + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/handler.go new file mode 100644 index 00000000000..b824606a341 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/handler.go @@ -0,0 +1,87 @@ +package http + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/gobwas/ws" + log "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/execution" +) + +const ( + httpHeaderUpgrade string = "Upgrade" +) + +func NewGraphqlHTTPHandlerFunc(executionHandler *execution.Handler, logger log.Logger, upgrader *ws.HTTPUpgrader) http.Handler { + return &GraphQLHTTPRequestHandler{ + log: logger, + executionHandler: executionHandler, + wsUpgrader: upgrader, + } +} + +type GraphQLHTTPRequestHandler struct { + log log.Logger + executionHandler *execution.Handler + wsUpgrader *ws.HTTPUpgrader +} + +func (g *GraphQLHTTPRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + isUpgrade := g.isWebsocketUpgrade(r) + if isUpgrade { + err := g.upgradeWithNewGoroutine(w, r) + if err != nil { + g.log.Error("GraphQLHTTPRequestHandler.ServeHTTP", + log.Error(err), + ) + w.WriteHeader(http.StatusBadRequest) + } + return + } + g.handleHTTP(w, r) +} + +func (g *GraphQLHTTPRequestHandler) upgradeWithNewGoroutine(w http.ResponseWriter, r *http.Request) error { + conn, _, _, err := g.wsUpgrader.Upgrade(r, w) + if err != nil { + return err + } + g.handleWebsocket(conn) + return nil +} + +func (g *GraphQLHTTPRequestHandler) isWebsocketUpgrade(r *http.Request) bool { + for _, header := range r.Header[httpHeaderUpgrade] { + if header == "websocket" { + return true + } + } + return false +} + +func (g *GraphQLHTTPRequestHandler) extraVariables(r *http.Request, out io.Writer) error { + headers := map[string]string{} + for key := range r.Header { + headers[key] = r.Header.Get(key) + } + + cookies := map[string]string{} + for _, cookie := range r.Cookies() { + cookies[cookie.Name] = cookie.Value + } + + extra := map[string]interface{}{ + "request": map[string]interface{}{ + "uri": r.RequestURI, + "method": r.Method, + "host": r.Host, + "headers": headers, + "cookies": cookies, + }, + } + + return json.NewEncoder(out).Encode(extra) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/http.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/http.go new file mode 100644 index 00000000000..2f2a8928882 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/http.go @@ -0,0 +1,60 @@ +// Package http handles GraphQL HTTP Requests including WebSocket Upgrades. +package http + +import ( + "bytes" + "io/ioutil" + "net/http" + + log "github.com/jensneuse/abstractlogger" +) + +const ( + httpHeaderContentType string = "Content-Type" + + httpContentTypeApplicationJson string = "application/json" +) + +func (g *GraphQLHTTPRequestHandler) handleHTTP(w http.ResponseWriter, r *http.Request) { + data, err := ioutil.ReadAll(r.Body) + if err != nil { + g.log.Error("GraphQLHTTPRequestHandler.handleHTTP", + log.Error(err), + ) + w.WriteHeader(http.StatusBadRequest) + return + } + + extra := &bytes.Buffer{} + err = g.extraVariables(r, extra) + if err != nil { + g.log.Error("executionHandler.Handle.json.Marshal(extra)", + log.Error(err), + ) + w.WriteHeader(http.StatusBadRequest) + return + } + + executor, rootNode, ctx, err := g.executionHandler.Handle(data, extra.Bytes()) + if err != nil { + g.log.Error("executionHandler.Handle", + log.Error(err), + ) + w.WriteHeader(http.StatusBadRequest) + return + } + ctx.Context = r.Context() + buf := bytes.NewBuffer(make([]byte, 0, 4096)) + err = executor.Execute(ctx, rootNode, buf) + if err != nil { + g.log.Error("executor.Execute", + log.Error(err), + ) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Add(httpHeaderContentType, "application/json") + w.WriteHeader(http.StatusOK) + _, _ = buf.WriteTo(w) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws.go new file mode 100644 index 00000000000..5a669fff068 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws.go @@ -0,0 +1,160 @@ +package http + +import ( + "context" + "encoding/json" + "net" + + "github.com/gobwas/ws" + "github.com/gobwas/ws/wsutil" + "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/subscription" +) + +// WebsocketSubscriptionClient is an actual implementation of the subscritpion client interface. +type WebsocketSubscriptionClient struct { + logger abstractlogger.Logger + // clientConn holds the actual connection to the client. + clientConn net.Conn + // isClosedConnection indicates if the websocket connection is closed. + isClosedConnection bool +} + +// NewWebsocketSubscriptionClient will create a new websocket subscription client. +func NewWebsocketSubscriptionClient(logger abstractlogger.Logger, clientConn net.Conn) *WebsocketSubscriptionClient { + return &WebsocketSubscriptionClient{ + logger: logger, + clientConn: clientConn, + } +} + +// ReadFromClient will read a subscription message from the websocket client. +func (w *WebsocketSubscriptionClient) ReadFromClient() (message *subscription.Message, err error) { + var data []byte + var opCode ws.OpCode + + data, opCode, err = wsutil.ReadClientData(w.clientConn) + if err != nil { + if w.isClosedConnectionError(err) { + return message, nil + } + + w.logger.Error("http.WebsocketSubscriptionClient.ReadFromClient()", + abstractlogger.Error(err), + abstractlogger.ByteString("data", data), + abstractlogger.Any("opCode", opCode), + ) + + w.isClosedConnectionError(err) + + return nil, err + } + + err = json.Unmarshal(data, &message) + if err != nil { + w.logger.Error("http.WebsocketSubscriptionClient.ReadFromClient()", + abstractlogger.Error(err), + abstractlogger.ByteString("data", data), + abstractlogger.Any("opCode", opCode), + ) + + return nil, err + } + + return message, nil +} + +// WriteToClient will write a subscription message to the websocket client. +func (w *WebsocketSubscriptionClient) WriteToClient(message subscription.Message) error { + if w.isClosedConnection { + return nil + } + + messageBytes, err := json.Marshal(message) + if err != nil { + w.logger.Error("http.WebsocketSubscriptionClient.WriteToClient()", + abstractlogger.Error(err), + abstractlogger.Any("message", message), + ) + + return err + } + + err = wsutil.WriteServerMessage(w.clientConn, ws.OpText, messageBytes) + if err != nil { + w.logger.Error("http.WebsocketSubscriptionClient.WriteToClient()", + abstractlogger.Error(err), + abstractlogger.ByteString("messageBytes", messageBytes), + ) + + return err + } + + return nil +} + +// IsConnected will indicate if the websocket conenction is still established. +func (w *WebsocketSubscriptionClient) IsConnected() bool { + return !w.isClosedConnection +} + +// Disconnect will close the websocket connection. +func (w *WebsocketSubscriptionClient) Disconnect() error { + w.logger.Debug("http.GraphQLHTTPRequestHandler.Disconnect()", + abstractlogger.String("message", "disconnecting client"), + ) + w.isClosedConnection = true + return w.clientConn.Close() +} + +// isClosedConnectionError will indicate if the given error is a conenction closed error. +func (w *WebsocketSubscriptionClient) isClosedConnectionError(err error) bool { + if _, ok := err.(wsutil.ClosedError); ok { + w.isClosedConnection = true + } + + return w.isClosedConnection +} + +func HandleWebsocket(done chan bool, errChan chan error, conn net.Conn, executorPool subscription.ExecutorPool, logger abstractlogger.Logger) { + defer func() { + if err := conn.Close(); err != nil { + logger.Error("http.HandleWebsocket()", + abstractlogger.String("message", "could not close connection to client"), + abstractlogger.Error(err), + ) + } + }() + + websocketClient := NewWebsocketSubscriptionClient(logger, conn) + subscriptionHandler, err := subscription.NewHandler(logger, websocketClient, executorPool) + if err != nil { + logger.Error("http.HandleWebsocket()", + abstractlogger.String("message", "could not create subscriptionHandler"), + abstractlogger.Error(err), + ) + + errChan <- err + return + } + + close(done) + subscriptionHandler.Handle(context.Background()) // Blocking +} + +// handleWebsocket will handle the websocket connection. +func (g *GraphQLHTTPRequestHandler) handleWebsocket(conn net.Conn) { + done := make(chan bool) + errChan := make(chan error) + + executorPool := subscription.NewExecutorV1Pool(g.executionHandler) + go HandleWebsocket(done, errChan, conn, executorPool, g.log) + select { + case err := <-errChan: + g.log.Error("http.GraphQLHTTPRequestHandler.handleWebsocket()", + abstractlogger.Error(err), + ) + case <-done: + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_connection_init.json b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_connection_init.json new file mode 100644 index 00000000000..b2ddf343c71 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_connection_init.json @@ -0,0 +1,4 @@ +{ + "type": "connection_init", + "payload": {} +} \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_start.json b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_start.json new file mode 100644 index 00000000000..de8fc245618 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/http/ws_start.json @@ -0,0 +1,10 @@ +{ + "id": "1", + "type": "start", + "payload": { + "variables": {}, + "extensions": {}, + "operationName": "stream", + "query": "subscription stream {n stream {n datetimen timezonen abbreviationn }n}n" + } +} \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/converter.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/converter.go new file mode 100644 index 00000000000..4704364e8c9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/converter.go @@ -0,0 +1,275 @@ +package introspection + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astimport" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type JsonConverter struct { + schema *Schema + doc *ast.Document + parser *astparser.Parser +} + +func (j *JsonConverter) GraphQLDocument(introspectionJSON io.Reader) (*ast.Document, error) { + var data Data + if err := json.NewDecoder(introspectionJSON).Decode(&data); err != nil { + return nil, fmt.Errorf("failed to parse inrospection json: %v", err) + } + + j.schema = &data.Schema + j.doc = ast.NewDocument() + j.parser = astparser.NewParser() + + if err := j.importSchema(); err != nil { + return nil, fmt.Errorf("failed to convert graphql schema: %v", err) + } + + return j.doc, nil +} + +func (j *JsonConverter) importSchema() error { + j.doc.ImportSchemaDefinition(j.schema.TypeNames()) + + for i := 0; i < len(j.schema.Types); i++ { + if err := j.importFullType(j.schema.Types[i]); err != nil { + return err + } + } + + for i := 0; i < len(j.schema.Directives); i++ { + if err := j.importDirective(j.schema.Directives[i]); err != nil { + return err + } + } + + return nil +} + +func (j *JsonConverter) importFullType(fullType FullType) (err error) { + switch fullType.Kind { + case SCALAR: + j.doc.ImportScalarTypeDefinition(fullType.Name, fullType.Description) + case OBJECT: + err = j.importObject(fullType) + case ENUM: + j.importEnum(fullType) + case INTERFACE: + err = j.importInterface(fullType) + case UNION: + err = j.importUnion(fullType) + case INPUTOBJECT: + err = j.importInputObject(fullType) + } + return +} + +func (j *JsonConverter) importObject(fullType FullType) error { + fieldRefs, err := j.importFields(fullType.Fields) + if err != nil { + return err + } + + iRefs := make([]int, len(fullType.Interfaces)) + for i := 0; i < len(iRefs); i++ { + iRefs[i] = j.importType(fullType.Interfaces[i]) + } + + j.doc.ImportObjectTypeDefinition( + fullType.Name, + fullType.Description, + fieldRefs, + iRefs) + + return nil +} + +func (j *JsonConverter) importInterface(fullType FullType) error { + fieldRefs, err := j.importFields(fullType.Fields) + if err != nil { + return err + } + + j.doc.ImportInterfaceTypeDefinition( + fullType.Name, + fullType.Description, + fieldRefs) + + return nil +} + +func (j *JsonConverter) importDirective(directive Directive) error { + argRefs, err := j.importInputFields(directive.Args) + if err != nil { + return err + } + + j.doc.ImportDirectiveDefinition( + directive.Name, + directive.Description, + argRefs, + directive.Locations) + + return nil +} + +func (j *JsonConverter) importInputObject(fullType FullType) error { + argRefs, err := j.importInputFields(fullType.InputFields) + if err != nil { + return err + } + + j.doc.ImportInputObjectTypeDefinition( + fullType.Name, + fullType.Description, + argRefs) + + return nil +} + +func (j *JsonConverter) importEnum(fullType FullType) { + valueRefs := make([]int, len(fullType.EnumValues)) + for i := 0; i < len(valueRefs); i++ { + var directiveRefs []int + if fullType.EnumValues[i].IsDeprecated { + directiveRefs = append(directiveRefs, j.importDeprecatedDirective(fullType.EnumValues[i].DeprecationReason)) + } + + valueRefs[i] = j.doc.ImportEnumValueDefinition( + fullType.EnumValues[i].Name, + fullType.EnumValues[i].Description, + directiveRefs, + ) + } + + j.doc.ImportEnumTypeDefinition( + fullType.Name, + fullType.Description, + valueRefs) +} + +func (j *JsonConverter) importUnion(fullType FullType) error { + typeRefs := make([]int, len(fullType.PossibleTypes)) + for i := 0; i < len(typeRefs); i++ { + typeRefs[i] = j.importType(fullType.PossibleTypes[i]) + } + + j.doc.ImportUnionTypeDefinition( + fullType.Name, + fullType.Description, + typeRefs) + + return nil +} + +func (j *JsonConverter) importFields(fields []Field) (refs []int, err error) { + refs = make([]int, len(fields)) + for i := 0; i < len(refs); i++ { + fieldRef, err := j.importField(fields[i]) + if err != nil { + return nil, err + } + refs[i] = fieldRef + } + + return +} + +func (j *JsonConverter) importField(field Field) (ref int, err error) { + typeRef := j.importType(field.Type) + + argRefs, err := j.importInputFields(field.Args) + if err != nil { + return -1, err + } + + var directiveRefs []int + if field.IsDeprecated { + directiveRefs = append(directiveRefs, j.importDeprecatedDirective(field.DeprecationReason)) + } + + return j.doc.ImportFieldDefinition( + field.Name, field.Description, typeRef, argRefs, directiveRefs), nil +} + +func (j *JsonConverter) importInputFields(fields []InputValue) (refs []int, err error) { + refs = make([]int, len(fields)) + for i := 0; i < len(refs); i++ { + argRef, err := j.importInputField(fields[i]) + if err != nil { + return nil, err + } + refs[i] = argRef + } + return +} + +func (j *JsonConverter) importInputField(field InputValue) (ref int, err error) { + typeRef := j.importType(field.Type) + + defaultValue, err := j.importDefaultValue(field.DefaultValue) + if err != nil { + return -1, err + } + + return j.doc.ImportInputValueDefinition( + field.Name, field.Description, typeRef, defaultValue), nil +} + +func (j *JsonConverter) importType(typeRef TypeRef) (ref int) { + switch typeRef.Kind { + case LIST: + return j.doc.AddListType(j.importType(*typeRef.OfType)) + case NONNULL: + return j.doc.AddNonNullType(j.importType(*typeRef.OfType)) + } + + return j.doc.AddNamedType([]byte(*typeRef.Name)) +} + +func (j *JsonConverter) importDefaultValue(defaultValue *string) (out ast.DefaultValue, err error) { + if defaultValue == nil { + return + } + + from := ast.NewDocument() + from.Input.AppendInputString(*defaultValue) + + report := &operationreport.Report{} + + j.parser.PrepareImport(from, report) + value := j.parser.ParseValue() + + if report.HasErrors() { + err = report + return + } + + importer := &astimport.Importer{} + return ast.DefaultValue{ + IsDefined: true, + Value: importer.ImportValue(value, from, j.doc), + }, nil +} + +func (j *JsonConverter) importDeprecatedDirective(reason *string) (ref int) { + var args []int + if reason != nil { + valueRef := j.doc.ImportStringValue([]byte(*reason), strings.Contains(*reason, "\n")) + value := ast.Value{ + Kind: ast.ValueKindString, + Ref: valueRef, + } + j.doc.AddValue(value) + args = append(args, j.doc.ImportArgument(DeprecationReasonArgName, value)) + } + + return j.doc.ImportDirective(DeprecatedDirectiveName, args) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/generator.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/generator.go new file mode 100644 index 00000000000..cc18e4d910c --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/generator.go @@ -0,0 +1,352 @@ +package introspection + +import ( + "strings" + + "github.com/TykTechnologies/graphql-go-tools/internal/pkg/unsafebytes" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +const ( + DeprecatedDirectiveName = "deprecated" + DeprecationReasonArgName = "reason" +) + +type Generator struct { + Data *Data + walker *astvisitor.Walker + visitor *introspectionVisitor +} + +func NewGenerator() *Generator { + walker := astvisitor.NewWalker(48) + visitor := introspectionVisitor{ + Walker: &walker, + } + + walker.RegisterEnterDocumentVisitor(&visitor) + walker.RegisterEnterDirectiveLocationVisitor(&visitor) + walker.RegisterEnterInputValueDefinitionVisitor(&visitor) + walker.RegisterEnterRootOperationTypeDefinitionVisitor(&visitor) + walker.RegisterEnterScalarTypeDefinitionVisitor(&visitor) + walker.RegisterEnterUnionMemberTypeVisitor(&visitor) + + walker.RegisterDirectiveDefinitionVisitor(&visitor) + walker.RegisterEnumTypeDefinitionVisitor(&visitor) + walker.RegisterFieldDefinitionVisitor(&visitor) + walker.RegisterInputObjectTypeDefinitionVisitor(&visitor) + walker.RegisterInterfaceTypeDefinitionVisitor(&visitor) + walker.RegisterObjectTypeDefinitionVisitor(&visitor) + walker.RegisterUnionTypeDefinitionVisitor(&visitor) + + walker.RegisterLeaveEnumValueDefinitionVisitor(&visitor) + + return &Generator{ + walker: &walker, + visitor: &visitor, + } +} + +func (g *Generator) Generate(definition *ast.Document, report *operationreport.Report, data *Data) { + g.visitor.data = data + g.visitor.definition = definition + g.walker.Walk(definition, nil, report) +} + +type introspectionVisitor struct { + *astvisitor.Walker + definition *ast.Document + data *Data + currentType FullType + currentField Field + currentDirective Directive +} + +func (i *introspectionVisitor) EnterDocument(operation, definition *ast.Document) { + i.data.Schema = NewSchema() +} + +func (i *introspectionVisitor) EnterObjectTypeDefinition(ref int) { + i.currentType = NewFullType() + i.currentType.Name = i.definition.ObjectTypeDefinitionNameString(ref) + i.currentType.Kind = OBJECT + i.currentType.Description = i.definition.ObjectTypeDescriptionNameString(ref) + for _, typeRef := range i.definition.ObjectTypeDefinitions[ref].ImplementsInterfaces.Refs { + name := i.definition.TypeNameString(typeRef) + i.currentType.Interfaces = append(i.currentType.Interfaces, TypeRef{ + Kind: INTERFACE, + Name: &name, + }) + } +} + +func (i *introspectionVisitor) LeaveObjectTypeDefinition(ref int) { + if strings.HasPrefix(i.currentType.Name, "__") { + return + } + i.data.Schema.Types = append(i.data.Schema.Types, i.currentType) +} + +func (i *introspectionVisitor) EnterFieldDefinition(ref int) { + i.currentField = NewField() + i.currentField.Name = i.definition.FieldDefinitionNameString(ref) + i.currentField.Description = i.definition.FieldDefinitionDescriptionString(ref) + i.currentField.Type = i.TypeRef(i.definition.FieldDefinitionType(ref)) + + if i.definition.FieldDefinitionHasDirectives(ref) { + directiveRef, exists := i.definition.FieldDefinitionDirectiveByName(ref, []byte(DeprecatedDirectiveName)) + if exists { + i.currentField.IsDeprecated = true + i.currentField.DeprecationReason = i.deprecationReason(directiveRef) + } + } +} + +func (i *introspectionVisitor) LeaveFieldDefinition(ref int) { + if strings.HasPrefix(i.currentField.Name, "__") { + return + } + i.currentType.Fields = append(i.currentType.Fields, i.currentField) +} + +func (i *introspectionVisitor) EnterInputValueDefinition(ref int) { + var defaultValue *string + if i.definition.InputValueDefinitionHasDefaultValue(ref) { + value := i.definition.InputValueDefinitionDefaultValue(ref) + printedValue, err := i.definition.PrintValueBytes(value, nil) + if err != nil { + i.StopWithInternalErr(err) + return + } + printedStr := unsafebytes.BytesToString(printedValue) + defaultValue = &printedStr + } + + inputValue := InputValue{ + Name: i.definition.InputValueDefinitionNameString(ref), + Description: i.definition.InputValueDefinitionDescriptionString(ref), + Type: i.TypeRef(i.definition.InputValueDefinitionType(ref)), + DefaultValue: defaultValue, + } + + switch i.Ancestors[len(i.Ancestors)-1].Kind { + case ast.NodeKindInputObjectTypeDefinition: + i.currentType.InputFields = append(i.currentType.InputFields, inputValue) + case ast.NodeKindFieldDefinition: + i.currentField.Args = append(i.currentField.Args, inputValue) + case ast.NodeKindDirectiveDefinition: + i.currentDirective.Args = append(i.currentDirective.Args, inputValue) + } +} + +func (i *introspectionVisitor) EnterInterfaceTypeDefinition(ref int) { + i.currentType = NewFullType() + i.currentType.Kind = INTERFACE + i.currentType.Name = i.definition.InterfaceTypeDefinitionNameString(ref) + i.currentType.Description = i.definition.InterfaceTypeDefinitionDescriptionString(ref) + + interfaceNameBytes := i.definition.InterfaceTypeDefinitionNameBytes(ref) + for objectTypeDefRef := range i.definition.ObjectTypeDefinitions { + if i.definition.ObjectTypeDefinitionImplementsInterface(objectTypeDefRef, interfaceNameBytes) { + objectName := i.definition.ObjectTypeDefinitionNameString(objectTypeDefRef) + i.currentType.PossibleTypes = append(i.currentType.PossibleTypes, TypeRef{ + Kind: OBJECT, + Name: &objectName, + }) + } + } + + for _, interfaceTypeExtension := range i.definition.InterfaceTypeExtensions { + interfaceTypeExtensionName := i.definition.Input.ByteSliceString(interfaceTypeExtension.Name) + for _, implementedInterfaceRef := range interfaceTypeExtension.ImplementsInterfaces.Refs { + if i.currentType.Name == interfaceTypeExtensionName { + implementedInterfaceName := i.definition.TypeNameString(implementedInterfaceRef) + i.currentType.Interfaces = append(i.currentType.Interfaces, TypeRef{ + Kind: INTERFACE, + Name: &implementedInterfaceName, + }) + } + } + } + + for _, implementedInterfaceRef := range i.definition.InterfaceTypeDefinitions[ref].ImplementsInterfaces.Refs { + implementedInterfaceName := i.definition.TypeNameString(implementedInterfaceRef) + i.currentType.Interfaces = append(i.currentType.Interfaces, TypeRef{ + Kind: INTERFACE, + Name: &implementedInterfaceName, + }) + } +} + +func (i *introspectionVisitor) LeaveInterfaceTypeDefinition(ref int) { + if strings.HasPrefix(i.currentType.Name, "__") { + return + } + i.data.Schema.Types = append(i.data.Schema.Types, i.currentType) +} + +func (i *introspectionVisitor) EnterScalarTypeDefinition(ref int) { + typeDefinition := NewFullType() + typeDefinition.Kind = SCALAR + typeDefinition.Name = i.definition.ScalarTypeDefinitionNameString(ref) + typeDefinition.Description = i.definition.ScalarTypeDefinitionDescriptionString(ref) + i.data.Schema.Types = append(i.data.Schema.Types, typeDefinition) +} + +func (i *introspectionVisitor) EnterUnionTypeDefinition(ref int) { + i.currentType = NewFullType() + i.currentType.Kind = UNION + i.currentType.Name = i.definition.UnionTypeDefinitionNameString(ref) + i.currentType.Description = i.definition.UnionTypeDefinitionDescriptionString(ref) +} + +func (i *introspectionVisitor) LeaveUnionTypeDefinition(ref int) { + if strings.HasPrefix(i.currentType.Name, "__") { + return + } + i.data.Schema.Types = append(i.data.Schema.Types, i.currentType) +} + +func (i *introspectionVisitor) EnterUnionMemberType(ref int) { + name := i.definition.TypeNameString(ref) + i.currentType.PossibleTypes = append(i.currentType.PossibleTypes, TypeRef{ + Kind: OBJECT, + Name: &name, + }) +} + +func (i *introspectionVisitor) EnterEnumTypeDefinition(ref int) { + i.currentType = NewFullType() + i.currentType.Kind = ENUM + i.currentType.Name = i.definition.EnumTypeDefinitionNameString(ref) + i.currentType.Description = i.definition.EnumTypeDefinitionDescriptionString(ref) +} + +func (i *introspectionVisitor) LeaveEnumTypeDefinition(ref int) { + if strings.HasPrefix(i.currentType.Name, "__") { + return + } + i.data.Schema.Types = append(i.data.Schema.Types, i.currentType) +} + +func (i *introspectionVisitor) LeaveEnumValueDefinition(ref int) { + enumValue := EnumValue{ + Name: i.definition.EnumValueDefinitionNameString(ref), + Description: i.definition.EnumValueDefinitionDescriptionString(ref), + } + + if i.definition.EnumValueDefinitionHasDirectives(ref) { + directiveRef, exists := i.definition.EnumValueDefinitionDirectiveByName(ref, []byte(DeprecatedDirectiveName)) + if exists { + enumValue.IsDeprecated = true + enumValue.DeprecationReason = i.deprecationReason(directiveRef) + } + } + + i.currentType.EnumValues = append(i.currentType.EnumValues, enumValue) +} + +func (i *introspectionVisitor) EnterInputObjectTypeDefinition(ref int) { + i.currentType = NewFullType() + i.currentType.Kind = INPUTOBJECT + i.currentType.Name = i.definition.InputObjectTypeDefinitionNameString(ref) + i.currentType.Description = i.definition.InputObjectTypeDefinitionDescriptionString(ref) +} + +func (i *introspectionVisitor) LeaveInputObjectTypeDefinition(ref int) { + i.data.Schema.Types = append(i.data.Schema.Types, i.currentType) +} + +func (i *introspectionVisitor) EnterDirectiveDefinition(ref int) { + i.currentDirective = NewDirective() + i.currentDirective.Name = i.definition.DirectiveDefinitionNameString(ref) + i.currentDirective.Description = i.definition.DirectiveDefinitionDescriptionString(ref) + i.currentDirective.IsRepeatable = i.definition.DirectiveDefinitions[ref].Repeatable.IsRepeatable +} + +func (i *introspectionVisitor) LeaveDirectiveDefinition(ref int) { + i.data.Schema.Directives = append(i.data.Schema.Directives, i.currentDirective) +} + +func (i *introspectionVisitor) EnterDirectiveLocation(location ast.DirectiveLocation) { + i.currentDirective.Locations = append(i.currentDirective.Locations, location.LiteralString()) +} + +func (i *introspectionVisitor) EnterRootOperationTypeDefinition(ref int) { + switch i.definition.RootOperationTypeDefinitions[ref].OperationType { + case ast.OperationTypeQuery: + i.data.Schema.QueryType = &TypeName{ + Name: i.definition.Input.ByteSliceString(i.definition.RootOperationTypeDefinitions[ref].NamedType.Name), + } + case ast.OperationTypeMutation: + i.data.Schema.MutationType = &TypeName{ + Name: i.definition.Input.ByteSliceString(i.definition.RootOperationTypeDefinitions[ref].NamedType.Name), + } + case ast.OperationTypeSubscription: + i.data.Schema.SubscriptionType = &TypeName{ + Name: i.definition.Input.ByteSliceString(i.definition.RootOperationTypeDefinitions[ref].NamedType.Name), + } + } +} + +func (i *introspectionVisitor) TypeRef(typeRef int) TypeRef { + switch i.definition.Types[typeRef].TypeKind { + case ast.TypeKindNamed: + name := i.definition.TypeNameBytes(typeRef) + node, exists := i.definition.Index.FirstNodeByNameBytes(name) + if !exists { + return TypeRef{} + } + var typeKind __TypeKind + switch node.Kind { + case ast.NodeKindScalarTypeDefinition: + typeKind = SCALAR + case ast.NodeKindObjectTypeDefinition: + typeKind = OBJECT + case ast.NodeKindEnumTypeDefinition: + typeKind = ENUM + case ast.NodeKindInterfaceTypeDefinition: + typeKind = INTERFACE + case ast.NodeKindUnionTypeDefinition: + typeKind = UNION + case ast.NodeKindInputObjectTypeDefinition: + typeKind = INPUTOBJECT + } + nameStr := unsafebytes.BytesToString(name) + return TypeRef{ + Kind: typeKind, + Name: &nameStr, + } + case ast.TypeKindNonNull: + ofType := i.TypeRef(i.definition.Types[typeRef].OfType) + return TypeRef{ + Kind: NONNULL, + OfType: &ofType, + } + case ast.TypeKindList: + ofType := i.TypeRef(i.definition.Types[typeRef].OfType) + return TypeRef{ + Kind: LIST, + OfType: &ofType, + } + default: + return TypeRef{} + } +} + +func (i *introspectionVisitor) deprecationReason(directiveRef int) (reason *string) { + argValue, exists := i.definition.DirectiveArgumentValueByName(directiveRef, []byte(DeprecationReasonArgName)) + if exists { + reasonContent := i.definition.ValueContentString(argValue) + return &reasonContent + } + + defaultValue := i.definition.DirectiveDefinitionArgumentDefaultValueString(DeprecatedDirectiveName, DeprecationReasonArgName) + if defaultValue != "" { + return &defaultValue + } + + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection.go new file mode 100644 index 00000000000..5865855164e --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection.go @@ -0,0 +1,155 @@ +//go:generate go-enum -f=$GOFILE --noprefix --marshal + +// Package introspection takes a GraphQL Schema and provides the introspection JSON to fulfill introspection queries. +package introspection + +import ( + "bytes" +) + +type Data struct { + Schema Schema `json:"__schema"` +} + +type Schema struct { + QueryType *TypeName `json:"queryType"` + MutationType *TypeName `json:"mutationType"` + SubscriptionType *TypeName `json:"subscriptionType"` + Types []FullType `json:"types"` + Directives []Directive `json:"directives"` +} + +func (s *Schema) TypeNames() (query, mutation, subscription string) { + if s.QueryType != nil { + query = s.QueryType.Name + } + if s.MutationType != nil { + mutation = s.MutationType.Name + } + if s.SubscriptionType != nil { + subscription = s.SubscriptionType.Name + } + return +} + +func NewSchema() Schema { + return Schema{ + Types: make([]FullType, 0), + Directives: make([]Directive, 0), + } +} + +type TypeName struct { + Name string `json:"name"` +} + +type FullType struct { + Kind __TypeKind `json:"kind"` + Name string `json:"name"` + Description string `json:"description"` + // not empty for __TypeKind OBJECT and INTERFACE only + Fields []Field `json:"fields"` + // not empty for __TypeKind INPUT_OBJECT only + InputFields []InputValue `json:"inputFields"` + // not empty for __TypeKind OBJECT only + Interfaces []TypeRef `json:"interfaces"` + // not empty for __TypeKind ENUM only + EnumValues []EnumValue `json:"enumValues"` + // not empty for __TypeKind INTERFACE and UNION only + PossibleTypes []TypeRef `json:"possibleTypes"` +} + +func NewFullType() FullType { + return FullType{ + Fields: make([]Field, 0), + InputFields: make([]InputValue, 0), + Interfaces: make([]TypeRef, 0), + EnumValues: make([]EnumValue, 0), + PossibleTypes: make([]TypeRef, 0), + } +} + +/* +ENUM( +SCALAR +LIST +NON_NULL +OBJECT +ENUM +INTERFACE +UNION +INPUT_OBJECT +) +*/ +type __TypeKind int + +func (x __TypeKind) MarshalJSON() ([]byte, error) { + + text, err := x.MarshalText() + if err != nil { + return nil, err + } + + var buff bytes.Buffer + _, err = buff.WriteRune('"') + if err != nil { + return nil, err + } + _, err = buff.Write(text) + if err != nil { + return nil, err + } + _, err = buff.WriteRune('"') + + return buff.Bytes(), err +} + +type TypeRef struct { + Kind __TypeKind `json:"kind"` + Name *string `json:"name"` + OfType *TypeRef `json:"ofType"` +} + +type Field struct { + Name string `json:"name"` + Description string `json:"description"` + Args []InputValue `json:"args"` + Type TypeRef `json:"type"` + IsDeprecated bool `json:"isDeprecated"` + DeprecationReason *string `json:"deprecationReason"` +} + +func NewField() Field { + return Field{ + Args: make([]InputValue, 0), + } +} + +type EnumValue struct { + Name string `json:"name"` + Description string `json:"description"` + IsDeprecated bool `json:"isDeprecated"` + DeprecationReason *string `json:"deprecationReason"` +} + +type InputValue struct { + Name string `json:"name"` + Description string `json:"description"` + Type TypeRef `json:"type"` + DefaultValue *string `json:"defaultValue"` +} + +type Directive struct { + Name string `json:"name"` + Description string `json:"description"` + Locations []string `json:"locations"` + Args []InputValue `json:"args"` + IsRepeatable bool `json:"isRepeatable"` +} + +func NewDirective() Directive { + return Directive{ + Locations: make([]string, 0), + Args: make([]InputValue, 0), + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection_enum.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection_enum.go new file mode 100644 index 00000000000..f66b96bcc16 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/introspection/introspection_enum.go @@ -0,0 +1,83 @@ +// Code generated by go-enum +// DO NOT EDIT! + +package introspection + +import ( + "fmt" +) + +const ( + // SCALAR is a __TypeKind of type SCALAR + SCALAR __TypeKind = iota + // LIST is a __TypeKind of type LIST + LIST + // NONNULL is a __TypeKind of type NON_NULL + NONNULL + // OBJECT is a __TypeKind of type OBJECT + OBJECT + // ENUM is a __TypeKind of type ENUM + ENUM + // INTERFACE is a __TypeKind of type INTERFACE + INTERFACE + // UNION is a __TypeKind of type UNION + UNION + // INPUTOBJECT is a __TypeKind of type INPUT_OBJECT + INPUTOBJECT +) + +const ___TypeKindName = "SCALARLISTNON_NULLOBJECTENUMINTERFACEUNIONINPUT_OBJECT" + +var ___TypeKindMap = map[__TypeKind]string{ + 0: ___TypeKindName[0:6], + 1: ___TypeKindName[6:10], + 2: ___TypeKindName[10:18], + 3: ___TypeKindName[18:24], + 4: ___TypeKindName[24:28], + 5: ___TypeKindName[28:37], + 6: ___TypeKindName[37:42], + 7: ___TypeKindName[42:54], +} + +// String implements the Stringer interface. +func (x __TypeKind) String() string { + if str, ok := ___TypeKindMap[x]; ok { + return str + } + return fmt.Sprintf("__TypeKind(%d)", x) +} + +var ___TypeKindValue = map[string]__TypeKind{ + ___TypeKindName[0:6]: 0, + ___TypeKindName[6:10]: 1, + ___TypeKindName[10:18]: 2, + ___TypeKindName[18:24]: 3, + ___TypeKindName[24:28]: 4, + ___TypeKindName[28:37]: 5, + ___TypeKindName[37:42]: 6, + ___TypeKindName[42:54]: 7, +} + +// Parse__TypeKind attempts to convert a string to a __TypeKind +func Parse__TypeKind(name string) (__TypeKind, error) { + if x, ok := ___TypeKindValue[name]; ok { + return x, nil + } + return __TypeKind(0), fmt.Errorf("%s is not a valid __TypeKind", name) +} + +// MarshalText implements the text marshaller method +func (x *__TypeKind) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the text unmarshaller method +func (x *__TypeKind) UnmarshalText(text []byte) error { + name := string(text) + tmp, err := Parse__TypeKind(name) + if err != nil { + return err + } + *x = tmp + return nil +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword.go new file mode 100644 index 00000000000..393c1206041 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword.go @@ -0,0 +1,105 @@ +//go:generate stringer -type=IdentKeyword + +// Package identkeyword contains all possible keywords for GraphQL identifiers +package identkeyword + +type IdentKeyword int + +const ( + UNDEFINED IdentKeyword = iota + ON + TRUE + FALSE + NULL + QUERY + MUTATION + SUBSCRIPTION + FRAGMENT + IMPLEMENTS + SCHEMA + SCALAR + TYPE + INTERFACE + UNION + ENUM + INPUT + DIRECTIVE + EXTEND + REPEATABLE +) + +func KeywordFromLiteral(literal []byte) IdentKeyword { + switch len(literal) { + case 2: + if literal[0] == 'o' && literal[1] == 'n' { + return ON + } + case 4: + if literal[0] == 'n' && literal[1] == 'u' && literal[2] == 'l' && literal[3] == 'l' { + return NULL + } + if literal[0] == 'e' && literal[1] == 'n' && literal[2] == 'u' && literal[3] == 'm' { + return ENUM + } + if literal[0] == 't' { + if literal[1] == 'r' && literal[2] == 'u' && literal[3] == 'e' { + return TRUE + } + if literal[1] == 'y' && literal[2] == 'p' && literal[3] == 'e' { + return TYPE + } + } + case 5: + if literal[0] == 'f' && literal[1] == 'a' && literal[2] == 'l' && literal[3] == 's' && literal[4] == 'e' { + return FALSE + } + if literal[0] == 'u' && literal[1] == 'n' && literal[2] == 'i' && literal[3] == 'o' && literal[4] == 'n' { + return UNION + } + if literal[0] == 'q' && literal[1] == 'u' && literal[2] == 'e' && literal[3] == 'r' && literal[4] == 'y' { + return QUERY + } + if literal[0] == 'i' && literal[1] == 'n' && literal[2] == 'p' && literal[3] == 'u' && literal[4] == 't' { + return INPUT + } + case 6: + if literal[0] == 'e' && literal[1] == 'x' && literal[2] == 't' && literal[3] == 'e' && literal[4] == 'n' && literal[5] == 'd' { + return EXTEND + } + if literal[0] == 's' { + if literal[1] == 'c' && literal[2] == 'h' && literal[3] == 'e' && literal[4] == 'm' && literal[5] == 'a' { + return SCHEMA + } + if literal[1] == 'c' && literal[2] == 'a' && literal[3] == 'l' && literal[4] == 'a' && literal[5] == 'r' { + return SCALAR + } + } + case 8: + if literal[0] == 'm' && literal[1] == 'u' && literal[2] == 't' && literal[3] == 'a' && literal[4] == 't' && literal[5] == 'i' && literal[6] == 'o' && literal[7] == 'n' { + return MUTATION + } + if literal[0] == 'f' && literal[1] == 'r' && literal[2] == 'a' && literal[3] == 'g' && literal[4] == 'm' && literal[5] == 'e' && literal[6] == 'n' && literal[7] == 't' { + return FRAGMENT + } + case 9: + if literal[0] == 'i' && literal[1] == 'n' && literal[2] == 't' && literal[3] == 'e' && literal[4] == 'r' && literal[5] == 'f' && literal[6] == 'a' && literal[7] == 'c' && literal[8] == 'e' { + return INTERFACE + } + if literal[0] == 'd' && literal[1] == 'i' && literal[2] == 'r' && literal[3] == 'e' && literal[4] == 'c' && literal[5] == 't' && literal[6] == 'i' && literal[7] == 'v' && literal[8] == 'e' { + return DIRECTIVE + } + case 10: + if literal[0] == 'i' && literal[1] == 'm' && literal[2] == 'p' && literal[3] == 'l' && literal[4] == 'e' && literal[5] == 'm' && literal[6] == 'e' && literal[7] == 'n' && literal[8] == 't' && literal[9] == 's' { + return IMPLEMENTS + } + if literal[0] == 'r' && literal[1] == 'e' && literal[2] == 'p' && literal[3] == 'e' && literal[4] == 'a' && literal[5] == 't' && literal[6] == 'a' && literal[7] == 'b' && literal[8] == 'l' && literal[9] == 'e' { + return REPEATABLE + } + case 12: + if literal[0] == 's' && literal[1] == 'u' && literal[2] == 'b' && literal[3] == 's' && literal[4] == 'c' && literal[5] == 'r' && literal[6] == 'i' && literal[7] == 'p' && literal[8] == 't' && literal[9] == 'i' && literal[10] == 'o' && literal[11] == 'n' { + return SUBSCRIPTION + } + } + + return UNDEFINED +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword_string.go new file mode 100644 index 00000000000..bfc2d13ba6d --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/identkeyword/identkeyword_string.go @@ -0,0 +1,42 @@ +// Code generated by "stringer -type=IdentKeyword"; DO NOT EDIT. + +package identkeyword + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UNDEFINED-0] + _ = x[ON-1] + _ = x[TRUE-2] + _ = x[FALSE-3] + _ = x[NULL-4] + _ = x[QUERY-5] + _ = x[MUTATION-6] + _ = x[SUBSCRIPTION-7] + _ = x[FRAGMENT-8] + _ = x[IMPLEMENTS-9] + _ = x[SCHEMA-10] + _ = x[SCALAR-11] + _ = x[TYPE-12] + _ = x[INTERFACE-13] + _ = x[UNION-14] + _ = x[ENUM-15] + _ = x[INPUT-16] + _ = x[DIRECTIVE-17] + _ = x[EXTEND-18] + _ = x[REPEATABLE-19] +} + +const _IdentKeyword_name = "UNDEFINEDONTRUEFALSENULLQUERYMUTATIONSUBSCRIPTIONFRAGMENTIMPLEMENTSSCHEMASCALARTYPEINTERFACEUNIONENUMINPUTDIRECTIVEEXTENDREPEATABLE" + +var _IdentKeyword_index = [...]uint8{0, 9, 11, 15, 20, 24, 29, 37, 49, 57, 67, 73, 79, 83, 92, 97, 101, 106, 115, 121, 131} + +func (i IdentKeyword) String() string { + if i < 0 || i >= IdentKeyword(len(_IdentKeyword_index)-1) { + return "IdentKeyword(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _IdentKeyword_name[_IdentKeyword_index[i]:_IdentKeyword_index[i+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword.go new file mode 100644 index 00000000000..bfaf5356af9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword.go @@ -0,0 +1,42 @@ +//go:generate stringer -type=Keyword + +// Package keyword contains all possible GraphQL keywords +package keyword + +type Keyword int + +const ( + UNDEFINED Keyword = iota + IDENT + COMMENT + EOF + + COLON + BANG + LT + TAB + SPACE + COMMA + AT + DOT + SPREAD + PIPE + SLASH + EQUALS + SUB + AND + QUOTE + + DOLLAR + STRING + BLOCKSTRING + INTEGER + FLOAT + + LPAREN + RPAREN + LBRACK + RBRACK + LBRACE + RBRACE +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword_string.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword_string.go new file mode 100644 index 00000000000..f37f6a77650 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword/keyword_string.go @@ -0,0 +1,52 @@ +// Code generated by "stringer -type=Keyword"; DO NOT EDIT. + +package keyword + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UNDEFINED-0] + _ = x[IDENT-1] + _ = x[COMMENT-2] + _ = x[EOF-3] + _ = x[COLON-4] + _ = x[BANG-5] + _ = x[LT-6] + _ = x[TAB-7] + _ = x[SPACE-8] + _ = x[COMMA-9] + _ = x[AT-10] + _ = x[DOT-11] + _ = x[SPREAD-12] + _ = x[PIPE-13] + _ = x[SLASH-14] + _ = x[EQUALS-15] + _ = x[SUB-16] + _ = x[AND-17] + _ = x[QUOTE-18] + _ = x[DOLLAR-19] + _ = x[STRING-20] + _ = x[BLOCKSTRING-21] + _ = x[INTEGER-22] + _ = x[FLOAT-23] + _ = x[LPAREN-24] + _ = x[RPAREN-25] + _ = x[LBRACK-26] + _ = x[RBRACK-27] + _ = x[LBRACE-28] + _ = x[RBRACE-29] +} + +const _Keyword_name = "UNDEFINEDIDENTCOMMENTEOFCOLONBANGLTTABSPACECOMMAATDOTSPREADPIPESLASHEQUALSSUBANDQUOTEDOLLARSTRINGBLOCKSTRINGINTEGERFLOATLPARENRPARENLBRACKRBRACKLBRACERBRACE" + +var _Keyword_index = [...]uint8{0, 9, 14, 21, 24, 29, 33, 35, 38, 43, 48, 50, 53, 59, 63, 68, 74, 77, 80, 85, 91, 97, 108, 115, 120, 126, 132, 138, 144, 150, 156} + +func (i Keyword) String() string { + if i < 0 || i >= Keyword(len(_Keyword_index)-1) { + return "Keyword(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Keyword_name[_Keyword_index[i]:_Keyword_index[i+1]] +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/lexer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/lexer.go new file mode 100644 index 00000000000..e737aa88e94 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/lexer.go @@ -0,0 +1,430 @@ +// Package lexer contains the logic to turn an ast.Input into lexed tokens +package lexer + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token" +) + +// Lexer emits tokens from a input reader +type Lexer struct { + input *ast.Input +} + +func (l *Lexer) SetInput(input *ast.Input) { + l.input = input +} + +// Read emits the next token +func (l *Lexer) Read() (tok token.Token) { + + var next byte + + for { + tok.SetStart(l.input.InputPosition, l.input.TextPosition) + next = l.readRune() + if !l.byteIsWhitespace(next) { + break + } + } + + if l.matchSingleRuneToken(next, &tok) { + return + } + + switch next { + case runes.HASHTAG: + l.readComment(&tok) + return + case runes.QUOTE: + l.readString(&tok) + return + case runes.DOT: + l.readDotOrSpread(&tok) + return + } + + if runeIsDigit(next) { + l.readDigit(&tok) + return + } + + l.readIdent() + tok.Keyword = keyword.IDENT + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) + return +} + +func (l *Lexer) matchSingleRuneToken(r byte, tok *token.Token) bool { + + switch r { + case runes.EOF: + tok.Keyword = keyword.EOF + case runes.PIPE: + tok.Keyword = keyword.PIPE + case runes.EQUALS: + tok.Keyword = keyword.EQUALS + case runes.AT: + tok.Keyword = keyword.AT + case runes.COLON: + tok.Keyword = keyword.COLON + case runes.BANG: + tok.Keyword = keyword.BANG + case runes.LPAREN: + tok.Keyword = keyword.LPAREN + case runes.RPAREN: + tok.Keyword = keyword.RPAREN + case runes.LBRACE: + tok.Keyword = keyword.LBRACE + case runes.RBRACE: + tok.Keyword = keyword.RBRACE + case runes.LBRACK: + tok.Keyword = keyword.LBRACK + case runes.RBRACK: + tok.Keyword = keyword.RBRACK + case runes.AND: + tok.Keyword = keyword.AND + case runes.SUB: + tok.Keyword = keyword.SUB + case runes.DOLLAR: + tok.Keyword = keyword.DOLLAR + default: + return false + } + + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) + + return true +} + +func (l *Lexer) readIdent() { + for { + if l.input.InputPosition < l.input.Length { + if !l.runeIsIdent(l.input.RawBytes[l.input.InputPosition]) { + return + } + l.input.TextPosition.CharStart++ + l.input.InputPosition++ + } else { + return + } + } +} + +func (l *Lexer) readDotOrSpread(tok *token.Token) { + + isSpread := l.peekEquals(false, runes.DOT, runes.DOT) + + if isSpread { + l.swallowAmount(2) + tok.Keyword = keyword.SPREAD + } else { + tok.Keyword = keyword.DOT + } + + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) +} + +func (l *Lexer) readComment(tok *token.Token) { + + tok.Keyword = keyword.COMMENT + + for { + next := l.readRune() + switch next { + case runes.EOF: + return + case runes.CARRIAGERETURN, runes.LINETERMINATOR: + if l.peekRune(true) != runes.HASHTAG { + return + } + default: + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) + } + } +} + +func (l *Lexer) readString(tok *token.Token) { + + if l.peekEquals(false, runes.QUOTE, runes.QUOTE) { + l.swallowAmount(2) + l.readBlockString(tok) + } else { + l.readSingleLineString(tok) + } +} + +func (l *Lexer) swallowAmount(amount int) { + for i := 0; i < amount; i++ { + l.readRune() + } +} + +func (l *Lexer) peekEquals(ignoreWhitespace bool, equals ...byte) bool { + + var whitespaceOffset int + if ignoreWhitespace { + whitespaceOffset = l.peekWhitespaceLength() + } + + start := l.input.InputPosition + whitespaceOffset + end := l.input.InputPosition + len(equals) + whitespaceOffset + + if end > l.input.Length { + return false + } + + for i := 0; i < len(equals); i++ { + if l.input.RawBytes[start+i] != equals[i] { + return false + } + } + + return true +} + +func (l *Lexer) peekWhitespaceLength() (amount int) { + for i := l.input.InputPosition; i < l.input.Length; i++ { + if l.byteIsWhitespace(l.input.RawBytes[i]) { + amount++ + } else { + break + } + } + + return amount +} + +func (l *Lexer) readDigit(tok *token.Token) { + + var r byte + for { + r = l.peekRune(false) + if !runeIsDigit(r) { + break + } + l.readRune() + } + + hasExponent := r == runes.EXPONENT_LOWER || r == runes.EXPONENT_UPPER + isFloat := r == runes.DOT || hasExponent + + if isFloat { + l.readRune() + l.readFloat(hasExponent, tok) + return + } + + tok.Keyword = keyword.INTEGER + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) +} + +func (l *Lexer) readFloat(hasReadExponentAlready bool, tok *token.Token) { + + var r byte + for { + r = l.peekRune(false) + if !runeIsDigit(r) { + break + } + l.readRune() + } + + if hasReadExponentAlready { + float := keyword.FLOAT + tok.Keyword = float + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) + return + } + + optionalExponent := l.peekRune(false) + if optionalExponent == runes.EXPONENT_LOWER || optionalExponent == runes.EXPONENT_UPPER { + l.readRune() + } + + optionalPlusMinus := l.peekRune(false) + if optionalPlusMinus == runes.SUB || optionalPlusMinus == runes.ADD { + l.readRune() + } + + for { + r = l.peekRune(false) + if !runeIsDigit(r) { + break + } + l.readRune() + } + + float := keyword.FLOAT + tok.Keyword = float + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) +} + +func (l *Lexer) readRune() (r byte) { + + if l.input.InputPosition < l.input.Length { + r = l.input.RawBytes[l.input.InputPosition] + + if r == runes.LINETERMINATOR { + l.input.TextPosition.LineStart++ + l.input.TextPosition.CharStart = 1 + } else { + l.input.TextPosition.CharStart++ + } + + l.input.InputPosition++ + } else { + r = runes.EOF + } + + return +} + +func (l *Lexer) peekRune(ignoreWhitespace bool) (r byte) { + + for i := l.input.InputPosition; i < l.input.Length; i++ { + r = l.input.RawBytes[i] + if !ignoreWhitespace { + return r + } else if !l.byteIsWhitespace(r) { + return r + } + } + + return runes.EOF +} + +func (l *Lexer) runeIsIdent(r byte) bool { + + switch { + case r >= 'a' && r <= 'z': + return true + case r >= 'A' && r <= 'Z': + return true + case r >= '0' && r <= '9': + return true + case r == runes.SUB: + return true + case r == runes.UNDERSCORE: + return true + default: + return false + } +} + +func runeIsDigit(r byte) bool { + switch { + case r >= '0' && r <= '9': + return true + default: + return false + } +} + +func (l *Lexer) byteIsWhitespace(r byte) bool { + switch r { + case runes.SPACE, runes.TAB, runes.CARRIAGERETURN, runes.LINETERMINATOR, runes.COMMA: + return true + default: + return false + } +} + +func (l *Lexer) readBlockString(tok *token.Token) { + tok.Keyword = keyword.BLOCKSTRING + + tok.SetStart(l.input.InputPosition, l.input.TextPosition) + tok.TextPosition.CharStart -= 3 + + escaped := false + quoteCount := 0 + whitespaceCount := 0 + reachedFirstNonWhitespace := false + leadingWhitespaceToken := 0 + + for { + next := l.readRune() + switch next { + case runes.SPACE, runes.TAB, runes.CARRIAGERETURN, runes.LINETERMINATOR: + quoteCount = 0 + whitespaceCount++ + case runes.EOF: + return + case runes.QUOTE: + if escaped { + escaped = !escaped + continue + } + + quoteCount++ + + if quoteCount == 3 { + tok.SetEnd(l.input.InputPosition-3, l.input.TextPosition) + tok.Literal.Start += uint32(leadingWhitespaceToken) + tok.Literal.End -= uint32(whitespaceCount) + return + } + + case runes.BACKSLASH: + escaped = !escaped + quoteCount = 0 + whitespaceCount = 0 + default: + if !reachedFirstNonWhitespace { + reachedFirstNonWhitespace = true + leadingWhitespaceToken = whitespaceCount + } + escaped = false + quoteCount = 0 + whitespaceCount = 0 + } + } +} + +func (l *Lexer) readSingleLineString(tok *token.Token) { + + tok.Keyword = keyword.STRING + + tok.SetStart(l.input.InputPosition, l.input.TextPosition) + tok.TextPosition.CharStart -= 1 + + escaped := false + whitespaceCount := 0 + reachedFirstNonWhitespace := false + leadingWhitespaceToken := 0 + + for { + next := l.readRune() + switch next { + case runes.SPACE, runes.TAB: + whitespaceCount++ + case runes.EOF: + tok.SetEnd(l.input.InputPosition, l.input.TextPosition) + tok.Literal.Start += uint32(leadingWhitespaceToken) + tok.Literal.End -= uint32(whitespaceCount) + return + case runes.QUOTE, runes.CARRIAGERETURN, runes.LINETERMINATOR: + if escaped { + escaped = !escaped + continue + } + + tok.SetEnd(l.input.InputPosition-1, l.input.TextPosition) + tok.Literal.Start += uint32(leadingWhitespaceToken) + tok.Literal.End -= uint32(whitespaceCount) + return + case runes.BACKSLASH: + escaped = !escaped + whitespaceCount = 0 + default: + if !reachedFirstNonWhitespace { + reachedFirstNonWhitespace = true + leadingWhitespaceToken = whitespaceCount + } + escaped = false + whitespaceCount = 0 + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal/literal.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal/literal.go new file mode 100644 index 00000000000..dd539426013 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal/literal.go @@ -0,0 +1,146 @@ +// Package literal contains a selection of frequently used literals with GraphQL APIs +package literal + +import "bytes" + +var ( + COLON = []byte(":") + BANG = []byte("!") + LINETERMINATOR = []byte("\n") + TAB = []byte(" ") + SPACE = []byte(" ") + QUOTE = []byte("\"") + COMMA = []byte(",") + AT = []byte("@") + DOLLAR = []byte("$") + DOT = []byte(".") + SPREAD = []byte("...") + PIPE = []byte("|") + SLASH = []byte("/") + BACKSLASH = []byte("\\") + EQUALS = []byte("=") + SUB = []byte("-") + AND = []byte("&") + + LPAREN = []byte("(") + RPAREN = []byte(")") + LBRACK = []byte("[") + RBRACK = []byte("]") + LBRACE = []byte("{") + DOUBLE_LBRACE = []byte("{{") + DOUBLE_RBRACE = []byte("}}") + RBRACE = []byte("}") + + GOBOOL = []byte("bool") + GOINT32 = []byte("int32") + GOFLOAT32 = []byte("float32") + GOSTRING = []byte("string") + GONIL = []byte("nil") + + EOF = []byte("eof") + ID = []byte("ID") + Date = []byte("Date") + BOOLEAN = []byte("Boolean") + STRING = []byte("String") + INT = []byte("Int") + FLOAT = []byte("Float") + TYPE = []byte("type") + UNDERSCORETYPE = []byte("__type") + UNDERSCORESCHEMA = []byte("__schema") + TYPENAME = []byte("__typename") + GRAPHQLTYPE = []byte("graphqlType") + INTERFACE = []byte("interface") + INPUT = []byte("input") + WASMFILE = []byte("wasmFile") + INCLUDE = []byte("include") + IF = []byte("if") + SKIP = []byte("skip") + SCHEMA = []byte("schema") + EXTEND = []byte("extend") + SCALAR = []byte("scalar") + UNION = []byte("union") + ENUM = []byte("enum") + DIRECTIVE = []byte("directive") + REPEATABLE = []byte("repeatable") + QUERY = []byte("query") + MUTATION = []byte("mutation") + SUBSCRIPTION = []byte("subscription") + IMPLEMENTS = []byte("implements") + ON = []byte("on") + FRAGMENT = []byte("fragment") + NULL = []byte("null") + OBJECT = []byte("object") + DATA = []byte("data") + URL = []byte("url") + CONFIG_FILE_PATH = []byte("configFilePath") + CONFIG_STRING = []byte("configString") + DELAY_SECONDS = []byte("delaySeconds") + PIPELINE_CONFIG = []byte("pipelineConfig") + PIPELINE_CONFIG_STRING = []byte("pipelineConfigString") + PIPELINE_CONFIG_FILE = []byte("pipelineConfigFile") + TRANSFORMATION = []byte("transformation") + INPUT_JSON = []byte("inputJSON") + DEFAULT_TYPENAME = []byte("defaultTypeName") + STATUS_CODE_TYPENAME_MAPPINGS = []byte("statusCodeTypeNameMappings") + DOT_OBJECT_DOT = []byte(".object.") + DOT_ARGUMENTS_DOT = []byte(".arguments.") + ADDR = []byte("addr") + ADD = []byte("add") + BROKERADDR = []byte("brokerAddr") + CLIENTID = []byte("clientID") + TOPIC = []byte("topic") + HOST = []byte("host") + PARAMS = []byte("params") + FIELD = []byte("field") + BODY = []byte("body") + METHOD = []byte("method") + MODE = []byte("mode") + HEADERS = []byte("headers") + KEY = []byte("key") + OP = []byte("op") + REPLACE = []byte("replace") + INITIAL_BATCH_SIZE = []byte("initialBatchSize") + MILLISECONDS = []byte("milliSeconds") + PATH = []byte("path") + VALUE = []byte("value") + HTTP_METHOD_GET = []byte("GET") + HTTP_METHOD_POST = []byte("POST") + HTTP_METHOD_PUT = []byte("PUT") + HTTP_METHOD_DELETE = []byte("DELETE") + HTTP_METHOD_PATCH = []byte("PATCH") + + TRUE = []byte("true") + FALSE = []byte("false") + + LocationQuery = []byte("QUERY") + LocationMutation = []byte("MUTATION") + LocationSubscription = []byte("SUBSCRIPTION") + LocationField = []byte("FIELD") + LocationFragmentDefinition = []byte("FRAGMENT_DEFINITION") + LocationFragmentSpread = []byte("FRAGMENT_SPREAD") + LocationInlineFragment = []byte("INLINE_FRAGMENT") + LocationVariableDefinition = []byte("VARIABLE_DEFINITION") + + LocationSchema = []byte("SCHEMA") + LocationScalar = []byte("SCALAR") + LocationObject = []byte("OBJECT") + LocationFieldDefinition = []byte("FIELD_DEFINITION") + LocationArgumentDefinition = []byte("ARGUMENT_DEFINITION") + LocationInterface = []byte("INTERFACE") + LocationUnion = []byte("UNION") + LocationEnum = []byte("ENUM") + LocationEnumValue = []byte("ENUM_VALUE") + LocationInputObject = []byte("INPUT_OBJECT") + LocationInputFieldDefinition = []byte("INPUT_FIELD_DEFINITION") +) + +const ( + DOUBLE_LBRACE_STR = "{{" + DOUBLE_RBRACE_STR = "}}" +) + +type Literal []byte + +func (l Literal) Equals(another Literal) bool { + return bytes.Equal(l, another) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position/position.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position/position.go new file mode 100644 index 00000000000..e21fdc8c831 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position/position.go @@ -0,0 +1,42 @@ +// Package position contains the objects and logic to properly describe the position of a token in a GraphQL document +package position + +import "fmt" + +type Position struct { + LineStart uint32 + LineEnd uint32 + CharStart uint32 + CharEnd uint32 +} + +func (p Position) String() string { + return fmt.Sprintf("%d:%d-%d:%d", p.LineStart, p.CharStart, p.LineEnd, p.CharEnd) +} + +func (p *Position) Reset() { + p.LineStart = 1 + p.LineEnd = 1 + p.CharStart = 1 + p.CharEnd = 1 +} + +func (p *Position) MergeStartIntoStart(position Position) { + p.LineStart = position.LineStart + p.CharStart = position.CharStart +} + +func (p *Position) MergeStartIntoEnd(position Position) { + p.LineEnd = position.LineStart + p.CharEnd = position.CharStart +} + +func (p *Position) MergeEndIntoEnd(position Position) { + p.LineEnd = position.LineEnd + p.CharEnd = position.CharEnd +} + +func (p *Position) IsBefore(another Position) bool { + return p.LineEnd < another.LineStart || + p.LineEnd == another.LineStart && p.CharEnd < another.CharStart +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes/runes.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes/runes.go new file mode 100644 index 00000000000..af1fe2fe169 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/runes/runes.go @@ -0,0 +1,35 @@ +// Package runes contains all possible 'special' runes in a GraphQL document +package runes + +const ( + EOF = 0 + COLON = ':' + BANG = '!' + CARRIAGERETURN = '\r' + LINETERMINATOR = '\n' + TAB = '\t' + SPACE = ' ' + COMMA = ',' + HASHTAG = '#' + QUOTE = '"' + BACKSLASH = '\\' + DOT = '.' + EXPONENT_LOWER = 'e' + EXPONENT_UPPER = 'E' + AT = '@' + DOLLAR = '$' + PIPE = '|' + SLASH = '/' + EQUALS = '=' + SUB = '-' + ADD = '+' + AND = '&' + UNDERSCORE = '_' + + LPAREN = '(' + RPAREN = ')' + LBRACK = '[' + RBRACK = ']' + LBRACE = '{' + RBRACE = '}' +) diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token/token.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token/token.go new file mode 100644 index 00000000000..c6805d424cf --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/lexer/token/token.go @@ -0,0 +1,32 @@ +// Package token contains the object and logic needed to describe a lexed token in a GraphQL document +package token + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/keyword" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +type Token struct { + Keyword keyword.Keyword + Literal ast.ByteSliceReference + TextPosition position.Position +} + +func (t Token) String() string { + return fmt.Sprintf("token:: Keyword: %s, Pos: %s", t.Keyword, t.TextPosition) +} + +func (t *Token) SetStart(inputPosition int, textPosition position.Position) { + t.Literal.Start = uint32(inputPosition) + t.TextPosition.LineStart = textPosition.LineStart + t.TextPosition.CharStart = textPosition.CharStart +} + +func (t *Token) SetEnd(inputPosition int, textPosition position.Position) { + t.Literal.End = uint32(inputPosition) + t.TextPosition.LineEnd = textPosition.LineStart + t.TextPosition.CharEnd = textPosition.CharStart +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/middleware/operation_complexity/operation_complexity.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/middleware/operation_complexity/operation_complexity.go new file mode 100644 index 00000000000..f319f695ace --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/middleware/operation_complexity/operation_complexity.go @@ -0,0 +1,292 @@ +/* + package operation_complexity implements two common algorithms used by GitHub to calculate GraphQL query complexity + + 1. Node count, the maximum number of Nodes a query may return + 2. Complexity, the maximum number of Node requests that might be needed to execute the query + + OperationComplexityEstimator takes a schema definition and a query and then walks recursively through the query to calculate both variables. + + The calculation can be influenced by integer arguments on fields that indicate the amount of Nodes returned by a field. + + To help the algorithm understand your schema you could make use of these two directives: + + - directive @nodeCountMultiply on ARGUMENT_DEFINITION + - directive @nodeCountSkip on FIELD + + nodeCountMultiply: + Indicates that the Int value the directive is applied on should be used as a Node multiplier + + nodeCountSkip: + Indicates that the algorithm should skip this Node. This is useful to whitelist certain query paths, e.g. for introspection. +*/ +package operation_complexity + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astvisitor" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type OperationStats struct { + NodeCount int + Complexity int + Depth int +} + +type RootFieldStats struct { + TypeName string + FieldName string + Alias string + Stats OperationStats +} + +var ( + nodeCountMultiply = []byte("nodeCountMultiply") + nodeCountSkip = []byte("nodeCountSkip") +) + +type OperationComplexityEstimator struct { + walker *astvisitor.Walker + visitor *complexityVisitor +} + +func NewOperationComplexityEstimator() *OperationComplexityEstimator { + + walker := astvisitor.NewWalker(48) + visitor := &complexityVisitor{ + Walker: &walker, + multipliers: make([]multiplier, 0, 16), + } + + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterEnterArgumentVisitor(visitor) + walker.RegisterLeaveFieldVisitor(visitor) + walker.RegisterEnterFieldVisitor(visitor) + walker.RegisterEnterSelectionSetVisitor(visitor) + walker.RegisterEnterFragmentDefinitionVisitor(visitor) + + return &OperationComplexityEstimator{ + walker: &walker, + visitor: visitor, + } +} + +func (n *OperationComplexityEstimator) Do(operation, definition *ast.Document, report *operationreport.Report) (OperationStats, []RootFieldStats) { + n.visitor.count = 0 + n.visitor.complexity = 0 + n.visitor.maxFieldDepth = 0 + n.visitor.multipliers = n.visitor.multipliers[:0] + + n.visitor.maxSelectionSetFieldDepth = 0 + n.visitor.selectionSetDepth = 0 + + if n.visitor.calculatedRootFieldStats == nil { + n.visitor.calculatedRootFieldStats = make([]RootFieldStats, 0, len(definition.RootOperationTypeDefinitions)) + } + n.visitor.calculatedRootFieldStats = n.visitor.calculatedRootFieldStats[:0] + + if n.visitor.rootOperationTypeNames == nil { + n.visitor.rootOperationTypeNames = make(map[string]struct{}, len(definition.RootOperationTypeDefinitions)) + } + for key := range n.visitor.rootOperationTypeNames { + delete(n.visitor.rootOperationTypeNames, key) + } + + n.walker.Walk(operation, definition, report) + + depth := n.visitor.maxFieldDepth - n.visitor.selectionSetDepth + globalResult := OperationStats{ + NodeCount: n.visitor.count, + Complexity: n.visitor.complexity, + Depth: depth, + } + + return globalResult, n.visitor.calculatedRootFieldStats +} + +func CalculateOperationComplexity(operation, definition *ast.Document, report *operationreport.Report) (OperationStats, []RootFieldStats) { + estimator := NewOperationComplexityEstimator() + return estimator.Do(operation, definition, report) +} + +type complexityVisitor struct { + *astvisitor.Walker + operation, definition *ast.Document + count int + complexity int + maxFieldDepth int + multipliers []multiplier + + maxSelectionSetFieldDepth int + selectionSetDepth int + + rootOperationTypeNames map[string]struct{} + + currentRootFieldStats RootFieldStats + currentRootFieldMaxDepth int + currentRootFieldMaxSelectionSetDepth int + currentRootFieldSelectionSetDepth int + + calculatedRootFieldStats []RootFieldStats +} + +type multiplier struct { + fieldRef int + multi int +} + +func (c *complexityVisitor) calculateMultiplied(i int) int { + for _, j := range c.multipliers { + i = i * j.multi + } + return i +} + +func (c *complexityVisitor) EnterDocument(operation, definition *ast.Document) { + c.operation = operation + c.definition = definition + + for i := 0; i < len(c.definition.RootOperationTypeDefinitions); i++ { + name := c.definition.Input.ByteSliceString(c.definition.RootOperationTypeDefinitions[i].NamedType.Name) + c.rootOperationTypeNames[name] = struct{}{} + } +} + +func (c *complexityVisitor) EnterArgument(ref int) { + + if c.Ancestors[len(c.Ancestors)-1].Kind != ast.NodeKindField { + return + } + + definition, ok := c.ArgumentInputValueDefinition(ref) + if !ok { + return + } + + if !c.definition.InputValueDefinitionHasDirective(definition, nodeCountMultiply) { + return + } + + value := c.operation.ArgumentValue(ref) + if value.Kind == ast.ValueKindInteger { + multi := c.operation.IntValueAsInt32(value.Ref) + c.multipliers = append(c.multipliers, multiplier{ + fieldRef: c.Ancestors[len(c.Ancestors)-1].Ref, + multi: int(multi), + }) + } +} + +func (c *complexityVisitor) EnterField(ref int) { + definition, exists := c.FieldDefinition(ref) + if !exists { + return + } + + if _, exits := c.definition.FieldDefinitionDirectiveByName(definition, nodeCountSkip); exits { + c.SkipNode() + return + } + + typeName, fieldName, alias := c.extractFieldRelatedNames(ref, definition) + if c.isRootType(typeName) { + c.resetCurrentRootFieldComplexity(typeName, fieldName, alias) + } + + if !c.operation.FieldHasSelections(ref) { + return + } + + c.complexity = c.complexity + c.calculateMultiplied(1) + if c.Depth > c.maxFieldDepth { + c.maxFieldDepth = c.Depth + } + + c.currentRootFieldStats.Stats.Complexity = c.currentRootFieldStats.Stats.Complexity + c.calculateMultiplied(1) + if c.Depth > c.currentRootFieldMaxDepth { + c.currentRootFieldMaxDepth = c.Depth + } +} + +func (c *complexityVisitor) LeaveField(ref int) { + if c.isRootTypeField() { + c.endRootFieldComplexityCalculation() + } + + if len(c.multipliers) == 0 { + return + } + + if c.multipliers[len(c.multipliers)-1].fieldRef == ref { + c.multipliers = c.multipliers[:len(c.multipliers)-1] + } +} + +func (c *complexityVisitor) EnterSelectionSet(ref int) { + + if c.Ancestors[len(c.Ancestors)-1].Kind != ast.NodeKindField { + return + } + + c.count = c.count + c.calculateMultiplied(1) + if c.Depth > c.maxSelectionSetFieldDepth { + c.maxSelectionSetFieldDepth = c.Depth + c.selectionSetDepth++ + } + + c.currentRootFieldStats.Stats.NodeCount = c.currentRootFieldStats.Stats.NodeCount + c.calculateMultiplied(1) + if c.Depth > c.currentRootFieldMaxSelectionSetDepth { + c.currentRootFieldMaxSelectionSetDepth = c.Depth + c.currentRootFieldSelectionSetDepth++ + } +} + +func (c *complexityVisitor) EnterFragmentDefinition(ref int) { + c.SkipNode() +} + +func (c *complexityVisitor) resetCurrentRootFieldComplexity(typeName, fieldName, alias string) { + c.currentRootFieldStats = RootFieldStats{ + TypeName: typeName, + FieldName: fieldName, + Alias: alias, + Stats: OperationStats{ + NodeCount: 0, + Complexity: 0, + Depth: 0, + }, + } +} + +func (c *complexityVisitor) endRootFieldComplexityCalculation() { + currentDepth := c.currentRootFieldMaxDepth - c.currentRootFieldSelectionSetDepth + if currentDepth > 0 { + currentDepth-- + } + c.currentRootFieldStats.Stats.Depth = currentDepth + c.calculatedRootFieldStats = append(c.calculatedRootFieldStats, c.currentRootFieldStats) + + c.currentRootFieldMaxDepth = 0 + c.currentRootFieldMaxSelectionSetDepth = 0 + c.currentRootFieldSelectionSetDepth = 0 +} + +func (c *complexityVisitor) extractFieldRelatedNames(ref, definitionRef int) (typeName, fieldName, alias string) { + fieldName = c.definition.FieldDefinitionNameString(definitionRef) + alias = c.operation.FieldAliasOrNameString(ref) + if fieldName == alias { + alias = "" + } + + return c.EnclosingTypeDefinition.NameString(c.definition), fieldName, alias +} + +func (c *complexityVisitor) isRootType(name string) bool { + _, ok := c.rootOperationTypeNames[name] + return ok +} + +func (c *complexityVisitor) isRootTypeField() bool { + enclosingTypeName := c.EnclosingTypeDefinition.NameString(c.definition) + return c.isRootType(enclosingTypeName) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/openapi/openapi.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/openapi/openapi.go new file mode 100644 index 00000000000..de4310cddba --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/openapi/openapi.go @@ -0,0 +1,675 @@ +package openapi + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "sort" + "strconv" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/introspection" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/getkin/kin-openapi/openapi3" + "github.com/iancoleman/strcase" +) + +type converter struct { + openapi *openapi3.T + knownFullTypes map[string]struct{} + fullTypes []introspection.FullType +} + +func isValidResponse(status int) bool { + if status >= 200 && status < 300 { + return true + } + return false +} + +func MakeInputTypeName(name string) string { + parsed := strings.Split(name, "/") + return fmt.Sprintf("%sInput", strcase.ToCamel(parsed[len(parsed)-1])) +} + +func MakeFieldNameFromOperationID(operationID string) string { + return strcase.ToLowerCamel(operationID) +} + +func MakeFieldNameFromEndpoint(method, endpoint string) string { + endpoint = strings.Replace(endpoint, "/", " ", -1) + endpoint = strings.Replace(endpoint, "{", " ", -1) + endpoint = strings.Replace(endpoint, "}", " ", -1) + endpoint = strings.TrimSpace(endpoint) + return strcase.ToLowerCamel(fmt.Sprintf("%s %s", strings.ToLower(method), endpoint)) +} + +func MakeParameterName(name string) string { + return strcase.ToLowerCamel(name) +} + +func getOperationDescription(operation *openapi3.Operation) string { + var sb = strings.Builder{} + sb.WriteString(operation.Summary) + sb.WriteString("\n") + sb.WriteString(operation.Description) + return strings.TrimSpace(sb.String()) +} + +// __TypeKind of introspection is an unexported type. In order to overcome the problem, +// this function creates and returns a TypeRef for a given kind. kind is a AsyncAPI type. +func getTypeRef(kind string) (introspection.TypeRef, error) { + // See introspection_enum.go + switch kind { + case "string", "integer", "number", "boolean": + return introspection.TypeRef{Kind: 0}, nil + case "object": + return introspection.TypeRef{Kind: 3}, nil + case "array": + return introspection.TypeRef{Kind: 1}, nil + } + return introspection.TypeRef{}, fmt.Errorf("unknown type: %s", kind) +} + +func getParamTypeRef(kind string) (introspection.TypeRef, error) { + // See introspection_enum.go + switch kind { + case "string", "integer", "number", "boolean": + return introspection.TypeRef{Kind: 0}, nil + case "object": + // InputType + return introspection.TypeRef{Kind: 7}, nil + case "array": + return introspection.TypeRef{Kind: 1}, nil + } + return introspection.TypeRef{}, fmt.Errorf("unknown type: %s", kind) +} + +func getPrimitiveGraphQLTypeName(openapiType string) (string, error) { + switch openapiType { + case "string": + return string(literal.STRING), nil + case "integer": + return string(literal.INT), nil + case "number": + return string(literal.FLOAT), nil + case "boolean": + return string(literal.BOOLEAN), nil + default: + return "", fmt.Errorf("unknown type: %s", openapiType) + } +} + +func (c *converter) getGraphQLTypeName(schemaRef *openapi3.SchemaRef) (string, error) { + if schemaRef.Value.Type == "object" { + gqlType := extractFullTypeNameFromRef(schemaRef.Ref) + if gqlType == "" { + return "", errors.New("schema reference is empty") + } + err := c.processObject(schemaRef) + if err != nil { + return "", err + } + return gqlType, nil + } + return getPrimitiveGraphQLTypeName(schemaRef.Value.Type) +} + +func extractFullTypeNameFromRef(ref string) string { + parsed := strings.Split(ref, "/") + return strcase.ToCamel(parsed[len(parsed)-1]) +} + +func (c *converter) processSchemaProperties(fullType *introspection.FullType, schemas openapi3.Schemas) error { + for name, schemaRef := range schemas { + gqlType, err := c.getGraphQLTypeName(schemaRef) + if err != nil { + return err + } + + typeRef, err := getTypeRef(schemaRef.Value.Type) + if err != nil { + return err + } + typeRef.Name = &gqlType + field := introspection.Field{ + Name: name, + Type: typeRef, + Description: schemaRef.Value.Description, + } + + fullType.Fields = append(fullType.Fields, field) + sort.Slice(fullType.Fields, func(i, j int) bool { + return fullType.Fields[i].Name < fullType.Fields[j].Name + }) + } + return nil +} + +func (c *converter) processInputFields(ft *introspection.FullType, schemaRef *openapi3.SchemaRef) error { + for propertyName, property := range schemaRef.Value.Properties { + gqlType, err := getPrimitiveGraphQLTypeName(property.Value.Type) + if err != nil { + return err + } + typeRef, err := getTypeRef(property.Value.Type) + if err != nil { + return err + } + + typeRef.Name = &gqlType + f := introspection.InputValue{ + Name: propertyName, + Type: typeRef, + } + ft.InputFields = append(ft.InputFields, f) + sort.Slice(ft.InputFields, func(i, j int) bool { + return ft.InputFields[i].Name < ft.InputFields[j].Name + }) + } + return nil +} + +func (c *converter) processArray(schema *openapi3.SchemaRef) error { + fullTypeName := extractFullTypeNameFromRef(schema.Value.Items.Ref) + _, ok := c.knownFullTypes[fullTypeName] + if ok { + return nil + } + c.knownFullTypes[fullTypeName] = struct{}{} + + ft := introspection.FullType{ + Kind: introspection.OBJECT, + Name: fullTypeName, + } + typeOfElements := schema.Value.Items.Value.Type + if typeOfElements == "object" { + err := c.processSchemaProperties(&ft, schema.Value.Items.Value.Properties) + if err != nil { + return err + } + } else { + for _, item := range schema.Value.Items.Value.AllOf { + if item.Value.Type == "object" { + err := c.processSchemaProperties(&ft, item.Value.Properties) + if err != nil { + return err + } + } + } + } + c.fullTypes = append(c.fullTypes, ft) + return nil +} + +func (c *converter) processObject(schema *openapi3.SchemaRef) error { + fullTypeName := extractFullTypeNameFromRef(schema.Ref) + _, ok := c.knownFullTypes[fullTypeName] + if ok { + return nil + } + c.knownFullTypes[fullTypeName] = struct{}{} + + ft := introspection.FullType{ + Kind: introspection.OBJECT, + Name: fullTypeName, + Description: schema.Value.Description, + } + err := c.processSchemaProperties(&ft, schema.Value.Properties) + if err != nil { + return err + } + c.fullTypes = append(c.fullTypes, ft) + return nil +} + +func (c *converter) processInputObject(schema *openapi3.SchemaRef) error { + fullTypeName := MakeInputTypeName(schema.Ref) + _, ok := c.knownFullTypes[fullTypeName] + if ok { + return nil + } + c.knownFullTypes[fullTypeName] = struct{}{} + + ft := introspection.FullType{ + Kind: introspection.INPUTOBJECT, + Name: fullTypeName, + } + err := c.processInputFields(&ft, schema) + if err != nil { + return err + } + c.fullTypes = append(c.fullTypes, ft) + return nil +} + +func (c *converter) processSchema(schema *openapi3.SchemaRef) error { + if schema.Value.Type == "array" { + return c.processArray(schema) + } else if schema.Value.Type == "object" { + return c.processObject(schema) + } + + sort.Slice(c.fullTypes, func(i, j int) bool { + return c.fullTypes[i].Name < c.fullTypes[j].Name + }) + return nil +} + +func (c *converter) importFullTypes() ([]introspection.FullType, error) { + for _, pathItem := range c.openapi.Paths { + for _, method := range []string{http.MethodGet, http.MethodPost, http.MethodDelete, http.MethodPut} { + operation := pathItem.GetOperation(method) + if operation == nil { + continue + } + + for statusCodeStr := range operation.Responses { + if statusCodeStr == "default" { + continue + } + status, err := strconv.Atoi(statusCodeStr) + if err != nil { + return nil, err + } + if !isValidResponse(status) { + continue + } + + schema := getJSONSchema(status, operation) + if schema == nil { + continue + } + + err = c.processSchema(schema) + if err != nil { + return nil, err + } + } + } + } + sort.Slice(c.fullTypes, func(i, j int) bool { + return c.fullTypes[i].Name < c.fullTypes[j].Name + }) + return c.fullTypes, nil +} + +func extractTypeName(status int, operation *openapi3.Operation) string { + response := operation.Responses.Get(status) + if response == nil { + // Nil response? + return "" + } + schema := getJSONSchema(status, operation) + if schema == nil { + return "" + } + if schema.Value.Type == "array" { + return extractFullTypeNameFromRef(schema.Value.Items.Ref) + } + return extractFullTypeNameFromRef(schema.Ref) +} + +func getJSONSchemaFromResponseRef(response *openapi3.ResponseRef) *openapi3.SchemaRef { + if response == nil { + return nil + } + var schema *openapi3.SchemaRef + for _, mime := range []string{"application/json"} { + mediaType := response.Value.Content.Get(mime) + if mediaType != nil { + return mediaType.Schema + } + } + return schema +} + +func getJSONSchema(status int, operation *openapi3.Operation) *openapi3.SchemaRef { + response := operation.Responses.Get(status) + if response == nil { + return nil + } + return getJSONSchemaFromResponseRef(response) +} + +func getJSONSchemaFromRequestBody(operation *openapi3.Operation) *openapi3.SchemaRef { + for _, mime := range []string{"application/json"} { + mediaType := operation.RequestBody.Value.Content.Get(mime) + if mediaType != nil { + return mediaType.Schema + } + } + return nil +} + +func (c *converter) importQueryTypeFieldParameter(field *introspection.Field, name string, schema *openapi3.SchemaRef) error { + paramType := schema.Value.Type + if paramType == "array" { + paramType = schema.Value.Items.Value.Type + } + + typeRef, err := getTypeRef(paramType) + if err != nil { + return err + } + + gqlType, err := getPrimitiveGraphQLTypeName(paramType) + if err != nil { + return err + } + + if schema.Value.Items != nil { + ofType := schema.Value.Items.Value.Type + ofTypeRef, err := getTypeRef(ofType) + if err != nil { + return err + } + typeRef.OfType = &ofTypeRef + gqlType = fmt.Sprintf("[%s]", gqlType) + } + + typeRef.Name = &gqlType + iv := introspection.InputValue{ + Name: name, + Type: typeRef, + } + + field.Args = append(field.Args, iv) + sort.Slice(field.Args, func(i, j int) bool { + return field.Args[i].Name < field.Args[j].Name + }) + return nil +} + +func (c *converter) importQueryTypeFields(typeRef *introspection.TypeRef, operation *openapi3.Operation) (*introspection.Field, error) { + f := introspection.Field{ + Name: strcase.ToLowerCamel(operation.OperationID), + Type: *typeRef, + Description: getOperationDescription(operation), + } + + for _, parameter := range operation.Parameters { + schema := parameter.Value.Schema + if schema == nil { + mediaType := parameter.Value.Content.Get("application/json") + if mediaType != nil { + schema = mediaType.Schema + } + } + if schema == nil { + continue + } + err := c.importQueryTypeFieldParameter(&f, parameter.Value.Name, schema) + if err != nil { + return nil, err + } + } + return &f, nil +} + +func (c *converter) importQueryType() (*introspection.FullType, error) { + queryType := &introspection.FullType{ + Kind: introspection.OBJECT, + Name: "Query", + } + for key, pathItem := range c.openapi.Paths { + // We only support HTTP GET operation. + for _, method := range []string{http.MethodGet} { + operation := pathItem.GetOperation(method) + if operation == nil { + continue + } + for statusCodeStr := range operation.Responses { + if statusCodeStr == "default" { + continue + } + status, err := strconv.Atoi(statusCodeStr) + if err != nil { + return nil, err + } + + if !isValidResponse(status) { + continue + } + + schema := getJSONSchema(status, operation) + if schema == nil { + continue + } + getJSONSchema(status, operation) + kind := getJSONSchema(status, operation).Value.Type + if kind == "" { + // We assume that it is an object type. + kind = "object" + } + + typeName := strcase.ToCamel(extractTypeName(status, operation)) + typeRef, err := getTypeRef(kind) + if err != nil { + return nil, err + } + if kind == "array" { + // Array of some type + typeRef.OfType = &introspection.TypeRef{Kind: 3, Name: &typeName} + } + + typeRef.Name = &typeName + queryField, err := c.importQueryTypeFields(&typeRef, operation) + if err != nil { + return nil, err + } + if queryField.Name == "" { + queryField.Name = strings.Trim(key, "/") + } + queryType.Fields = append(queryType.Fields, *queryField) + } + } + } + sort.Slice(queryType.Fields, func(i, j int) bool { + return queryType.Fields[i].Name < queryType.Fields[j].Name + }) + return queryType, nil +} + +func (c *converter) addParameters(name string, schema *openapi3.SchemaRef) (*introspection.InputValue, error) { + paramType := schema.Value.Type + if paramType == "array" { + paramType = schema.Value.Items.Value.Type + } + + typeRef, err := getParamTypeRef(paramType) + if err != nil { + return nil, err + } + + gqlType := name + if paramType != "object" { + gqlType, err = getPrimitiveGraphQLTypeName(paramType) + if err != nil { + return nil, err + } + } else { + name = MakeInputTypeName(name) + gqlType = name + err = c.processInputObject(schema) + if err != nil { + return nil, err + } + } + + if schema.Value.Items != nil { + ofType := schema.Value.Items.Value.Type + ofTypeRef, err := getParamTypeRef(ofType) + if err != nil { + return nil, err + } + typeRef.OfType = &ofTypeRef + gqlType = fmt.Sprintf("[%s]", gqlType) + } + + typeRef.Name = &gqlType + return &introspection.InputValue{ + Name: MakeParameterName(name), + Type: typeRef, + }, nil +} + +func (c *converter) importMutationType() (*introspection.FullType, error) { + mutationType := &introspection.FullType{ + Kind: introspection.OBJECT, + Name: "Mutation", + } + for key, pathItem := range c.openapi.Paths { + for _, method := range []string{http.MethodPost, http.MethodPut, http.MethodDelete} { + operation := pathItem.GetOperation(method) + if operation == nil { + continue + } + for statusCodeStr := range operation.Responses { + if statusCodeStr == "default" { + continue + } + status, err := strconv.Atoi(statusCodeStr) + if err != nil { + return nil, err + } + + if !isValidResponse(status) { + continue + } + + typeName := strcase.ToCamel(extractTypeName(status, operation)) + if typeName == "" { + // IBM/openapi-to-graphql uses String as return type. + // TODO: https://stackoverflow.com/questions/44737043/is-it-possible-to-not-return-any-data-when-using-a-graphql-mutation/44773532#44773532 + typeName = "String" + } + + typeRef, err := getTypeRef("object") + if err != nil { + return nil, err + } + typeRef.Name = &typeName + + f := introspection.Field{ + Name: MakeFieldNameFromOperationID(operation.OperationID), + Type: typeRef, + Description: getOperationDescription(operation), + } + if f.Name == "" { + f.Name = MakeFieldNameFromEndpoint(method, key) + } + + var inputValue *introspection.InputValue + if operation.RequestBody != nil { + schema := getJSONSchemaFromRequestBody(operation) + inputValue, err = c.addParameters(extractFullTypeNameFromRef(schema.Ref), schema) + if err != nil { + return nil, err + } + f.Args = append(f.Args, *inputValue) + } else { + for _, parameter := range operation.Parameters { + inputValue, err = c.addParameters(parameter.Value.Name, parameter.Value.Schema) + if err != nil { + return nil, err + } + f.Args = append(f.Args, *inputValue) + } + } + sort.Slice(f.Args, func(i, j int) bool { + return f.Args[i].Name < f.Args[j].Name + }) + mutationType.Fields = append(mutationType.Fields, f) + } + } + } + sort.Slice(mutationType.Fields, func(i, j int) bool { + return mutationType.Fields[i].Name < mutationType.Fields[j].Name + }) + return mutationType, nil +} + +func ImportParsedOpenAPIv3Document(document *openapi3.T, report *operationreport.Report) *ast.Document { + c := &converter{ + openapi: document, + knownFullTypes: make(map[string]struct{}), + fullTypes: make([]introspection.FullType, 0), + } + data := introspection.Data{} + + data.Schema.QueryType = &introspection.TypeName{ + Name: "Query", + } + queryType, err := c.importQueryType() + if err != nil { + report.AddInternalError(err) + return nil + } + data.Schema.Types = append(data.Schema.Types, *queryType) + + mutationType, err := c.importMutationType() + if err != nil { + report.AddInternalError(err) + return nil + } + if len(mutationType.Fields) > 0 { + data.Schema.MutationType = &introspection.TypeName{ + Name: "Mutation", + } + data.Schema.Types = append(data.Schema.Types, *mutationType) + } + + fullTypes, err := c.importFullTypes() + if err != nil { + report.AddInternalError(err) + return nil + } + data.Schema.Types = append(data.Schema.Types, fullTypes...) + + outputPretty, err := json.MarshalIndent(data, "", " ") + if err != nil { + report.AddInternalError(err) + return nil + } + + jc := introspection.JsonConverter{} + buf := bytes.NewBuffer(outputPretty) + doc, err := jc.GraphQLDocument(buf) + if err != nil { + report.AddInternalError(err) + return nil + } + return doc +} + +func ParseOpenAPIDocument(input []byte) (*openapi3.T, error) { + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + document, err := loader.LoadFromData(input) + if err != nil { + return nil, err + } + if err = document.Validate(loader.Context); err != nil { + return nil, err + } + return document, nil +} + +func ImportOpenAPIDocumentByte(input []byte) (*ast.Document, operationreport.Report) { + report := operationreport.Report{} + document, err := ParseOpenAPIDocument(input) + if err != nil { + report.AddInternalError(err) + return nil, report + } + return ImportParsedOpenAPIv3Document(document, &report), report +} + +func ImportOpenAPIDocumentString(input string) (*ast.Document, operationreport.Report) { + return ImportOpenAPIDocumentByte([]byte(input)) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/externalerror.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/externalerror.go new file mode 100644 index 00000000000..b141b6555e7 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/externalerror.go @@ -0,0 +1,469 @@ +package operationreport + +import ( + "fmt" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphqlerrors" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/position" +) + +const ( + NotCompatibleTypeErrMsg = "%s cannot represent value: %s" + NotStringErrMsg = "%s cannot represent a non string value: %s" + NotIntegerErrMsg = "%s cannot represent non-integer value: %s" + BigIntegerErrMsg = "%s cannot represent non 32-bit signed integer value: %s" + NotFloatErrMsg = "%s cannot represent non numeric value: %s" + NotBooleanErrMsg = "%s cannot represent a non boolean value: %s" + NotIDErrMsg = "%s cannot represent a non-string and non-integer value: %s" + NotEnumErrMsg = `Enum "%s" cannot represent non-enum value: %s.` + NotAnEnumMemberErrMsg = `Value "%s" does not exist in "%s" enum.` + NullValueErrMsg = `Expected value of type "%s", found null.` + UnknownArgumentOnDirectiveErrMsg = `Unknown argument "%s" on directive "@%s".` + UnknownArgumentOnFieldErrMsg = `Unknown argument "%s" on field "%s.%s".` + UnknownTypeErrMsg = `Unknown type "%s".` + VariableIsNotInputTypeErrMsg = `Variable "$%s" cannot be non-input type "%s".` + MissingRequiredFieldOfInputObjectErrMsg = `Field "%s.%s" of required type "%s" was not provided.` + UnknownFieldOfInputObjectErrMsg = `Field "%s" is not defined by type "%s".` + DuplicatedFieldInputObjectErrMsg = `There can be only one input field named "%s".` + ValueIsNotAnInputObjectTypeErrMsg = `Expected value of type "%s", found %s.` +) + +type ExternalError struct { + Message string `json:"message"` + Path ast.Path `json:"path"` + Locations []graphqlerrors.Location `json:"locations"` +} + +func LocationsFromPosition(position ...position.Position) []graphqlerrors.Location { + out := make([]graphqlerrors.Location, len(position)) + for i, p := range position { + out[i].Line = p.LineStart + out[i].Column = p.CharStart + } + return out +} + +func ErrDocumentDoesntContainExecutableOperation() (err ExternalError) { + err.Message = "document doesn't contain any executable operation" + return +} + +func ErrFieldUndefinedOnType(fieldName, typeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("field: %s not defined on type: %s", fieldName, typeName) + return err +} + +func ErrFieldNameMustBeUniqueOnType(fieldName, typeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("field '%s.%s' can only be defined once", typeName, fieldName) + return err +} + +func ErrTypeUndefined(typeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf(UnknownTypeErrMsg, typeName) + return err +} + +func ErrScalarTypeUndefined(scalarName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("scalar not defined: %s", scalarName) + return err +} + +func ErrInterfaceTypeUndefined(interfaceName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("interface type not defined: %s", interfaceName) + return err +} + +func ErrUnionTypeUndefined(unionName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("union type not defined: %s", unionName) + return err +} + +func ErrEnumTypeUndefined(enumName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("enum type not defined: %s", enumName) + return err +} + +func ErrInputObjectTypeUndefined(inputObjectName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("input object type not defined: %s", inputObjectName) + return err +} + +func ErrTypeNameMustBeUnique(typeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("there can be only one type named '%s'", typeName) + return err +} + +func ErrOperationNameMustBeUnique(operationName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("operation name must be unique: %s", operationName) + return err +} + +func ErrAnonymousOperationMustBeTheOnlyOperationInDocument() (err ExternalError) { + err.Message = "anonymous operation name the only operation in a graphql document" + return err +} + +func ErrRequiredOperationNameIsMissing() (err ExternalError) { + err.Message = "operation name is required when providing multiple operations" + return err +} + +func ErrOperationWithProvidedOperationNameNotFound(operationName string) (err ExternalError) { + err.Message = fmt.Sprintf("cannot find an operation with name: %s", operationName) + return err +} + +func ErrSubscriptionMustOnlyHaveOneRootSelection(subscriptionName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("subscription: %s must only have one root selection", subscriptionName) + return err +} + +func ErrFieldSelectionOnUnion(fieldName, unionName ast.ByteSlice) (err ExternalError) { + + err.Message = fmt.Sprintf("cannot select field: %s on union: %s", fieldName, unionName) + return err +} + +func ErrFieldsConflict(objectName, leftType, rightType ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fields '%s' conflict because they return conflicting types '%s' and '%s'", objectName, leftType, rightType) + return err +} + +func ErrTypesForFieldMismatch(objectName, leftType, rightType ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("differing types '%s' and '%s' for objectName '%s'", leftType, rightType, objectName) + return err +} + +func ErrResponseOfDifferingTypesMustBeOfSameShape(leftObjectName, rightObjectName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("objects '%s' and '%s' on differing response types must be of same response shape", leftObjectName, rightObjectName) + return err +} + +func ErrDifferingFieldsOnPotentiallySameType(objectName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("differing fields for objectName '%s' on (potentially) same type", objectName) + return err +} + +func ErrFieldSelectionOnScalar(fieldName, scalarTypeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("cannot select field: %s on scalar %s", fieldName, scalarTypeName) + return err +} + +func ErrMissingFieldSelectionOnNonScalar(fieldName, enclosingTypeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("non scalar field: %s on type: %s must have selections", fieldName, enclosingTypeName) + return err +} + +func ErrArgumentNotDefinedOnDirective(argName, directiveName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(UnknownArgumentOnDirectiveErrMsg, argName, directiveName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrUnknownType(typeName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(UnknownTypeErrMsg, typeName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrMissingRequiredFieldOfInputObject(objName, fieldName, typeName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(MissingRequiredFieldOfInputObjectErrMsg, objName, fieldName, typeName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrUnknownFieldOfInputObject(objName, fieldName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(UnknownFieldOfInputObjectErrMsg, objName, fieldName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrDuplicatedFieldInputObject(fieldName ast.ByteSlice, first, duplicated position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(DuplicatedFieldInputObjectErrMsg, fieldName) + + err.Locations = []graphqlerrors.Location{ + { + Line: first.LineStart, + Column: first.CharStart, + }, + { + Line: duplicated.LineStart, + Column: duplicated.CharStart, + }, + } + + return err +} + +func ErrArgumentNotDefinedOnField(argName, typeName, fieldName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(UnknownArgumentOnFieldErrMsg, argName, typeName, fieldName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrNullValueDoesntSatisfyInputValueDefinition(inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NullValueErrMsg, inputType) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyEnum(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotEnumErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntExistsInEnum(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotAnEnumMemberErrMsg, value, inputType) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyType(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotCompatibleTypeErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueIsNotAnInputObjectType(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(ValueIsNotAnInputObjectTypeErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyString(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotStringErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyInt(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotIntegerErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrBigIntValueDoesntSatisfyInt(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(BigIntegerErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyFloat(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotFloatErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyBoolean(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotBooleanErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrValueDoesntSatisfyID(value, inputType ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(NotIDErrMsg, inputType, value) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrVariableTypeDoesntSatisfyInputValueDefinition(value, inputType, expectedType ast.ByteSlice, valuePos, variableDefinitionPos position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(`Variable "%v" of type "%v" used in position expecting type "%v".`, value, inputType, expectedType) + err.Locations = LocationsFromPosition(variableDefinitionPos, valuePos) + return err +} + +func ErrVariableNotDefinedOnOperation(variableName, operationName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("variable: %s not defined on operation: %s", variableName, operationName) + return err +} + +func ErrVariableDefinedButNeverUsed(variableName, operationName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("variable: %s defined on operation: %s but never used", variableName, operationName) + return err +} + +func ErrVariableMustBeUnique(variableName, operationName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("variable: %s must be unique per operation: %s", variableName, operationName) + return err +} + +func ErrVariableNotDefinedOnArgument(variableName, argumentName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("variable: %s not defined on argument: %s", variableName, argumentName) + return err +} + +func ErrVariableOfTypeIsNoValidInputValue(variableName, ofTypeName ast.ByteSlice, position position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(VariableIsNotInputTypeErrMsg, variableName, ofTypeName) + err.Locations = LocationsFromPosition(position) + + return err +} + +func ErrArgumentMustBeUnique(argName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("argument: %s must be unique", argName) + return err +} + +func ErrArgumentRequiredOnField(argName, fieldName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("argument: %s is required on field: %s but missing", argName, fieldName) + return err +} + +func ErrArgumentOnFieldMustNotBeNull(argName, fieldName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("argument: %s on field: %s must not be null", argName, fieldName) + return err +} + +func ErrFragmentSpreadFormsCycle(spreadName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fragment spread: %s forms fragment cycle", spreadName) + return err +} + +func ErrFragmentDefinedButNotUsed(fragmentName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fragment: %s defined but not used", fragmentName) + return err +} + +func ErrFragmentUndefined(fragmentName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fragment: %s undefined", fragmentName) + return err +} + +func ErrInlineFragmentOnTypeDisallowed(onTypeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("inline fragment on type: %s disallowed", onTypeName) + return err +} + +func ErrInlineFragmentOnTypeMismatchEnclosingType(fragmentTypeName, enclosingTypeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("inline fragment on type: %s mismatches enclosing type: %s", fragmentTypeName, enclosingTypeName) + return err +} + +func ErrFragmentDefinitionOnTypeDisallowed(fragmentName, onTypeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fragment: %s on type: %s disallowed", fragmentName, onTypeName) + return err +} + +func ErrFragmentDefinitionMustBeUnique(fragmentName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("fragment: %s must be unique per document", fragmentName) + return err +} + +func ErrDirectiveUndefined(directiveName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("directive: %s undefined", directiveName) + return err +} + +func ErrDirectiveNotAllowedOnNode(directiveName, nodeKindName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("directive: %s not allowed on node of kind: %s", directiveName, nodeKindName) + return err +} + +func ErrDirectiveMustBeUniquePerLocation(directiveName ast.ByteSlice, position, duplicatePosition position.Position) (err ExternalError) { + err.Message = fmt.Sprintf(`The directive "@%s" can only be used once at this location.`, directiveName) + if duplicatePosition.LineStart < position.LineStart || duplicatePosition.CharStart < position.CharStart { + err.Locations = LocationsFromPosition(duplicatePosition, position) + } else { + err.Locations = LocationsFromPosition(position, duplicatePosition) + } + + return err +} + +func ErrOnlyOneQueryTypeAllowed() (err ExternalError) { + err.Message = "there can be only one query type in schema" + return err +} + +func ErrOnlyOneMutationTypeAllowed() (err ExternalError) { + err.Message = "there can be only one mutation type in schema" + return err +} + +func ErrOnlyOneSubscriptionTypeAllowed() (err ExternalError) { + err.Message = "there can be only one subscription type in schema" + return err +} + +func ErrEnumValueNameMustBeUnique(enumName, enumValueName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("enum value '%s.%s' can only be defined once", enumName, enumValueName) + return err +} + +func ErrUnionMembersMustBeUnique(unionName, memberName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("union member '%s.%s' can only be defined once", unionName, memberName) + return err +} + +func ErrTransitiveInterfaceNotImplemented(typeName, transitiveInterfaceName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("type %s does not implement transitive interface %s", typeName, transitiveInterfaceName) + return err +} + +func ErrTransitiveInterfaceExtensionImplementingWithoutBody(interfaceExtensionName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("interface extension %s implementing interface without body", interfaceExtensionName) + return err +} + +func ErrTypeDoesNotImplementFieldFromInterface(typeName, interfaceName, fieldName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("type '%s' does not implement field '%s' from interface '%s'", typeName, fieldName, interfaceName) + return err +} + +func ErrImplementingTypeDoesNotHaveFields(typeName ast.ByteSlice) (err ExternalError) { + err.Message = fmt.Sprintf("type '%s' implements an interface but does not have any fields defined", typeName) + return err +} + +func ErrSharedTypesMustBeIdenticalToFederate(typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("the shared type named '%s' must be identical in any subgraphs to federate", typeName) + return err +} + +func ErrEntitiesMustNotBeDuplicated(typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("the entity named '%s' is defined in the subgraph(s) more than once", typeName) + return err +} + +func ErrSharedTypesMustNotBeExtended(typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("the type named '%s' cannot be extended because it is a shared type", typeName) + return err +} + +func ErrExtensionOrphansMustResolveInSupergraph(extensionNameBytes []byte) (err ExternalError) { + err.Message = fmt.Sprintf("the extension orphan named '%s' was never resolved in the supergraph", extensionNameBytes) + return err +} + +func ErrTypeBodyMustNotBeEmpty(definitionType, typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("the %s named '%s' is invalid due to an empty body", definitionType, typeName) + return err +} + +func ErrEntityExtensionMustHaveKeyDirective(typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("an extension of the entity named '%s' does not have a key directive", typeName) + return err +} + +func ErrExtensionWithKeyDirectiveMustExtendEntity(typeName string) (err ExternalError) { + err.Message = fmt.Sprintf("the extension named '%s' has a key directive but there is no entity of the same name", typeName) + return err +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/operationreport.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/operationreport.go new file mode 100644 index 00000000000..1a12f5365de --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/operationreport/operationreport.go @@ -0,0 +1,67 @@ +// Package operationreport helps generating the errors object for a GraphQL Operation. +package operationreport + +import ( + "errors" + "fmt" +) + +type Report struct { + InternalErrors []error + ExternalErrors []ExternalError +} + +func (r Report) Error() string { + out := "" + for i := range r.InternalErrors { + if i != 0 { + out += "\n" + } + out += fmt.Sprintf("internal: %s", r.InternalErrors[i].Error()) + } + if len(out) > 0 { + out += "\n" + } + for i := range r.ExternalErrors { + if i != 0 { + out += "\n" + } + out += fmt.Sprintf("external: %s, locations: %+v, path: %v", r.ExternalErrors[i].Message, r.ExternalErrors[i].Locations, r.ExternalErrors[i].Path) + } + return out +} + +func (r *Report) HasErrors() bool { + return len(r.InternalErrors) > 0 || len(r.ExternalErrors) > 0 +} + +func (r *Report) Reset() { + r.InternalErrors = r.InternalErrors[:0] + r.ExternalErrors = r.ExternalErrors[:0] +} + +func (r *Report) AddInternalError(err error) { + r.InternalErrors = append(r.InternalErrors, err) +} + +func (r *Report) AddExternalError(gqlError ExternalError) { + r.ExternalErrors = append(r.ExternalErrors, gqlError) +} + +type FormatExternalErrorMessage func(report *Report) string + +func ExternalErrorMessage(err error, formatFunction FormatExternalErrorMessage) (message string, ok bool) { + var report Report + if errors.As(err, &report) { + msg := formatFunction(&report) + return msg, true + } + return "", false +} + +func UnwrappedErrorMessage(err error) string { + for result := err; result != nil; result = errors.Unwrap(result) { + err = result + } + return err.Error() +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/bytesbuffer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/bytesbuffer.go new file mode 100644 index 00000000000..0d0086eedbf --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/bytesbuffer.go @@ -0,0 +1,29 @@ +package pool + +import ( + "bytes" + "sync" +) + +var ( + BytesBuffer = bytesBufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 1024)) + }, + }, + } +) + +type bytesBufferPool struct { + pool sync.Pool +} + +func (b *bytesBufferPool) Get() *bytes.Buffer { + return b.pool.Get().(*bytes.Buffer) +} + +func (b *bytesBufferPool) Put(buf *bytes.Buffer) { + buf.Reset() + b.pool.Put(buf) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/fastbuffer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/fastbuffer.go new file mode 100644 index 00000000000..fe0f2757742 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/fastbuffer.go @@ -0,0 +1,28 @@ +package pool + +import ( + "sync" + + "github.com/TykTechnologies/graphql-go-tools/pkg/fastbuffer" +) + +var FastBuffer = fastBufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return fastbuffer.New() + }, + }, +} + +type fastBufferPool struct { + pool sync.Pool +} + +func (f *fastBufferPool) Get() *fastbuffer.FastBuffer { + return f.pool.Get().(*fastbuffer.FastBuffer) +} + +func (f *fastBufferPool) Put(buf *fastbuffer.FastBuffer) { + buf.Reset() + f.pool.Put(buf) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/hash64.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/hash64.go new file mode 100644 index 00000000000..41b89e49f9b --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/pool/hash64.go @@ -0,0 +1,31 @@ +package pool + +import ( + "hash" + "sync" + + "github.com/cespare/xxhash/v2" +) + +var ( + Hash64 = hash64Pool{ + pool: sync.Pool{ + New: func() interface{} { + return xxhash.New() + }, + }, + } +) + +type hash64Pool struct { + pool sync.Pool +} + +func (b *hash64Pool) Get() hash.Hash64 { + return b.pool.Get().(hash.Hash64) +} + +func (b *hash64Pool) Put(hash64 hash.Hash64) { + hash64.Reset() + b.pool.Put(hash64) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/datasourceinput.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/datasourceinput.go new file mode 100644 index 00000000000..91a8e9abba4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/datasourceinput.go @@ -0,0 +1,104 @@ +package postprocess + +import ( + "strconv" + "strings" + + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" +) + +type ProcessDataSource struct{} + +func (d *ProcessDataSource) Process(pre plan.Plan) plan.Plan { + switch t := pre.(type) { + case *plan.SynchronousResponsePlan: + d.traverseNode(t.Response.Data) + case *plan.StreamingResponsePlan: + d.traverseNode(t.Response.InitialResponse.Data) + for i := range t.Response.Patches { + d.traverseFetch(t.Response.Patches[i].Fetch) + d.traverseNode(t.Response.Patches[i].Value) + } + case *plan.SubscriptionResponsePlan: + d.traverseTrigger(&t.Response.Trigger) + d.traverseNode(t.Response.Response.Data) + } + return pre +} + +func (d *ProcessDataSource) traverseNode(node resolve.Node) { + switch n := node.(type) { + case *resolve.Object: + d.traverseFetch(n.Fetch) + for i := range n.Fields { + d.traverseNode(n.Fields[i].Value) + } + case *resolve.Array: + d.traverseNode(n.Item) + } +} + +func (d *ProcessDataSource) traverseFetch(fetch resolve.Fetch) { + if fetch == nil { + return + } + switch f := fetch.(type) { + case *resolve.SingleFetch: + d.traverseSingleFetch(f) + case *resolve.BatchFetch: + d.traverseSingleFetch(f.Fetch) + case *resolve.ParallelFetch: + for i := range f.Fetches { + d.traverseFetch(f.Fetches[i]) + } + } +} + +func (d *ProcessDataSource) traverseTrigger(trigger *resolve.GraphQLSubscriptionTrigger) { + d.resolveInputTemplate(trigger.Variables, string(trigger.Input), &trigger.InputTemplate) + trigger.Input = nil + trigger.Variables = nil +} + +func (d *ProcessDataSource) traverseSingleFetch(fetch *resolve.SingleFetch) { + d.resolveInputTemplate(fetch.Variables, fetch.Input, &fetch.InputTemplate) + fetch.Input = "" + fetch.Variables = nil + fetch.InputTemplate.SetTemplateOutputToNullOnVariableNull = fetch.SetTemplateOutputToNullOnVariableNull + fetch.SetTemplateOutputToNullOnVariableNull = false +} + +func (d *ProcessDataSource) resolveInputTemplate(variables resolve.Variables, input string, template *resolve.InputTemplate) { + + if input == "" { + return + } + + if !strings.Contains(input, "$$") { + template.Segments = append(template.Segments, resolve.TemplateSegment{ + SegmentType: resolve.StaticSegmentType, + Data: []byte(input), + }) + return + } + + segments := strings.Split(input, "$$") + + isVariable := false + for _, seg := range segments { + switch { + case isVariable: + i, _ := strconv.Atoi(seg) + variableTemplateSegment := (variables)[i].TemplateSegment() + template.Segments = append(template.Segments, variableTemplateSegment) + isVariable = false + default: + template.Segments = append(template.Segments, resolve.TemplateSegment{ + SegmentType: resolve.StaticSegmentType, + Data: []byte(seg), + }) + isVariable = true + } + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/defer.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/defer.go new file mode 100644 index 00000000000..55a0d247007 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/defer.go @@ -0,0 +1,169 @@ +package postprocess + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type ProcessDefer struct { + objects []*resolve.Object + out *plan.StreamingResponsePlan + updated bool +} + +func (p *ProcessDefer) Process(pre plan.Plan) plan.Plan { + + p.out = nil + p.updated = false + p.objects = p.objects[:0] + + switch in := pre.(type) { + case *plan.SynchronousResponsePlan: + return p.synchronousResponse(in) + case *plan.StreamingResponsePlan: + return p.processStreamingResponsePlan(in) + default: + return pre + } +} + +func (p *ProcessDefer) processStreamingResponsePlan(in *plan.StreamingResponsePlan) plan.Plan { + p.out = in + for i := range p.out.Response.Patches { + p.traverseNode(p.out.Response.Patches[i].Value) + } + p.traverseNode(p.out.Response.InitialResponse.Data) + return p.out +} + +func (p *ProcessDefer) synchronousResponse(pre *plan.SynchronousResponsePlan) plan.Plan { + p.out = &plan.StreamingResponsePlan{ + FlushInterval: pre.FlushInterval, + Response: &resolve.GraphQLStreamingResponse{ + InitialResponse: pre.Response, + FlushInterval: pre.FlushInterval, + }, + } + p.traverseNode(p.out.Response.InitialResponse.Data) + if p.updated { + return p.out + } + return pre +} + +func (p *ProcessDefer) traverseNode(node resolve.Node) { + + switch n := node.(type) { + case *resolve.Object: + p.objects = append(p.objects, n) + for i := range n.Fields { + if n.Fields[i].Defer != nil { + p.updated = true + patchIndex, ok := p.createPatch(n, i) + if !ok { + continue + } + n.Fields[i].Defer = nil + n.Fields[i].Value = &resolve.Null{ + Defer: resolve.Defer{ + Enabled: true, + PatchIndex: patchIndex, + }, + } + p.traverseNode(p.out.Response.Patches[patchIndex].Value) + } else { + p.traverseNode(n.Fields[i].Value) + } + } + p.objects = p.objects[:len(p.objects)-1] + case *resolve.Array: + p.traverseNode(n.Item) + } +} + +func (p *ProcessDefer) createPatch(object *resolve.Object, field int) (int, bool) { + oldValue := object.Fields[field].Value + var patch *resolve.GraphQLResponsePatch + if object.Fields[field].HasBuffer && !p.bufferUsedOnNonDeferField(object, field, object.Fields[field].BufferID) { + patchFetch, ok := p.processFieldSetBuffer(object, field) + if !ok { + return 0, false + } + patch = &resolve.GraphQLResponsePatch{ + Value: oldValue, + Fetch: &patchFetch, + Operation: literal.REPLACE, + } + object.Fields[field].HasBuffer = false + object.Fields[field].BufferID = 0 + } else { + patch = &resolve.GraphQLResponsePatch{ + Value: oldValue, + Operation: literal.REPLACE, + } + } + p.out.Response.Patches = append(p.out.Response.Patches, patch) + patchIndex := len(p.out.Response.Patches) - 1 + return patchIndex, true +} + +func (p *ProcessDefer) bufferUsedOnNonDeferField(object *resolve.Object, field, bufferID int) bool { + for i := range object.Fields { + if object.Fields[i].BufferID != bufferID { + continue + } + if i == field { + continue // skip currently evaluated field + } + if object.Fields[i].Defer == nil { + return true + } + } + return false +} + +func (p *ProcessDefer) processFieldSetBuffer(object *resolve.Object, field int) (patchFetch resolve.SingleFetch, ok bool) { + id := object.Fields[field].BufferID + if p.objects[len(p.objects)-1].Fetch == nil { + return patchFetch, false + } + switch fetch := p.objects[len(p.objects)-1].Fetch.(type) { + case *resolve.SingleFetch: + if fetch.BufferId != id { + return patchFetch, false + } + patchFetch = *fetch + patchFetch.BufferId = 0 + p.objects[len(p.objects)-1].Fetch = nil + return patchFetch, true + case *resolve.BatchFetch: + if fetch.Fetch.BufferId != id { + return patchFetch, false + } + patchFetch = *fetch.Fetch + patchFetch.BufferId = 0 + p.objects[len(p.objects)-1].Fetch = nil + return patchFetch, true + case *resolve.ParallelFetch: + for k := range fetch.Fetches { + var singleFetch *resolve.SingleFetch + switch f := fetch.Fetches[k].(type) { + case *resolve.SingleFetch: + singleFetch = f + case *resolve.BatchFetch: + singleFetch = f.Fetch + } + if id == singleFetch.BufferId { + patchFetch = *singleFetch + patchFetch.BufferId = 0 + fetch.Fetches = append(fetch.Fetches[:k], fetch.Fetches[k+1:]...) + if len(fetch.Fetches) == 1 { + p.objects[len(p.objects)-1].Fetch = fetch.Fetches[0] + } + return patchFetch, true + } + } + } + return patchFetch, false +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/injectheader.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/injectheader.go new file mode 100644 index 00000000000..71acde270d3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/injectheader.go @@ -0,0 +1,103 @@ +package postprocess + +import ( + "encoding/json" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/buger/jsonparser" + "net/http" +) + +type ProcessInjectHeader struct { + header http.Header +} + +func NewProcessInjectHeader(header http.Header) *ProcessInjectHeader { + return &ProcessInjectHeader{header: header} +} + +func (p *ProcessInjectHeader) Process(pre plan.Plan) plan.Plan { + switch t := pre.(type) { + case *plan.SynchronousResponsePlan: + p.traverseNode(t.Response.Data) + case *plan.StreamingResponsePlan: + p.traverseNode(t.Response.InitialResponse.Data) + for i := range t.Response.Patches { + p.traverseFetch(t.Response.Patches[i].Fetch) + p.traverseNode(t.Response.Patches[i].Value) + } + case *plan.SubscriptionResponsePlan: + p.traverseTrigger(&t.Response.Trigger) + p.traverseNode(t.Response.Response.Data) + } + return pre +} + +func (p *ProcessInjectHeader) traverseNode(node resolve.Node) { + switch n := node.(type) { + case *resolve.Object: + p.traverseFetch(n.Fetch) + for i := range n.Fields { + p.traverseNode(n.Fields[i].Value) + } + case *resolve.Array: + p.traverseNode(n.Item) + } +} + +func (p *ProcessInjectHeader) traverseFetch(fetch resolve.Fetch) { + if fetch == nil { + return + } + switch f := fetch.(type) { + case *resolve.SingleFetch: + p.traverseSingleFetch(f) + case *resolve.BatchFetch: + p.traverseSingleFetch(f.Fetch) + case *resolve.ParallelFetch: + for i := range f.Fetches { + p.traverseFetch(f.Fetches[i]) + } + } +} + +func (p *ProcessInjectHeader) traverseTrigger(trigger *resolve.GraphQLSubscriptionTrigger) { + trigger.Input = []byte(p.injectHeader(trigger.Input)) +} + +func (p *ProcessInjectHeader) traverseSingleFetch(fetch *resolve.SingleFetch) { + fetch.Input = p.injectHeader([]byte(fetch.Input)) +} + +func (p *ProcessInjectHeader) injectHeader(input []byte) string { + var header http.Header + val, valType, _, err := jsonparser.Get(input, "header") + if err != nil && valType != jsonparser.NotExist { + return string(input) + } + + switch valType { + case jsonparser.NotExist: + header = p.header + case jsonparser.Object: + err := json.Unmarshal(val, &header) + if err != nil { + return string(input) + } + for key, val := range p.header { + header[key] = val + } + default: + return string(input) + } + + m, err := json.Marshal(header) + if err != nil { + return string(input) + } + updated, err := jsonparser.Set(input, m, "header") + if err != nil { + return string(input) + } + return string(updated) +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/postprocess.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/postprocess.go new file mode 100644 index 00000000000..e89c8b115a4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/postprocess.go @@ -0,0 +1,35 @@ +package postprocess + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" +) + +type PostProcessor interface { + Process(pre plan.Plan) plan.Plan +} + +type Processor struct { + postProcessors []PostProcessor +} + +func (p *Processor) AddPostProcessor(pr PostProcessor) { + p.postProcessors = append([]PostProcessor{pr}, p.postProcessors...) +} + +func DefaultProcessor() *Processor { + return &Processor{ + []PostProcessor{ + &ProcessDefer{}, + &ProcessStream{}, + &ProcessDataSource{}, + }, + } +} + +func (p *Processor) Process(pre plan.Plan) (post plan.Plan) { + post = pre + for i := range p.postProcessors { + post = p.postProcessors[i].Process(post) + } + return +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/stream.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/stream.go new file mode 100644 index 00000000000..95e83bb0a44 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/postprocess/stream.go @@ -0,0 +1,86 @@ +package postprocess + +import ( + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/plan" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/lexer/literal" +) + +type ProcessStream struct { + out *plan.StreamingResponsePlan + didUpdate bool +} + +func (p *ProcessStream) Process(pre plan.Plan) plan.Plan { + + p.out = nil + p.didUpdate = false + + switch in := pre.(type) { + case *plan.SynchronousResponsePlan: + return p.processSynchronousPlan(in) + case *plan.StreamingResponsePlan: + return p.processStreamingResponsePlan(in) + default: + return pre + } +} + +func (p *ProcessStream) processStreamingResponsePlan(in *plan.StreamingResponsePlan) plan.Plan { + p.out = in + for i := range p.out.Response.Patches { + p.traverseNode(p.out.Response.Patches[i].Value) + } + p.traverseNode(p.out.Response.InitialResponse.Data) + return p.out +} + +func (p *ProcessStream) processSynchronousPlan(in *plan.SynchronousResponsePlan) plan.Plan { + p.out = &plan.StreamingResponsePlan{ + FlushInterval: in.FlushInterval, + Response: &resolve.GraphQLStreamingResponse{ + InitialResponse: in.Response, + FlushInterval: in.FlushInterval, + }, + } + p.traverseNode(in.Response.Data) + if p.didUpdate { + return p.out + } + return in +} + +func (p *ProcessStream) traverseNode(node resolve.Node) { + switch n := node.(type) { + case *resolve.Object: + for i := range n.Fields { + if n.Fields[i].Stream != nil { + switch array := n.Fields[i].Value.(type) { + case *resolve.Array: + array.Stream.Enabled = true + array.Stream.InitialBatchSize = n.Fields[i].Stream.InitialBatchSize + n.Fields[i].Stream = nil + } + } + p.traverseNode(n.Fields[i].Value) + } + case *resolve.Array: + if n.Stream.Enabled { + p.didUpdate = true + patch := &resolve.GraphQLResponsePatch{ + Value: n.Item, + Operation: literal.ADD, + } + if n.Stream.InitialBatchSize == 0 { + n.Item = nil + } + p.out.Response.Patches = append(p.out.Response.Patches, patch) + n.Stream.PatchIndex = len(p.out.Response.Patches) - 1 + + p.traverseNode(p.out.Response.Patches[n.Stream.PatchIndex].Value) + + return + } + p.traverseNode(n.Item) + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/context.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/context.go new file mode 100644 index 00000000000..09b9efb6a09 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/context.go @@ -0,0 +1,49 @@ +package subscription + +import ( + "context" + "fmt" + "net/http" +) + +type InitialHttpRequestContext struct { + context.Context + Request *http.Request +} + +func NewInitialHttpRequestContext(r *http.Request) *InitialHttpRequestContext { + return &InitialHttpRequestContext{ + Context: r.Context(), + Request: r, + } +} + +type subscriptionCancellations map[string]context.CancelFunc + +func (sc subscriptionCancellations) Add(id string) (context.Context, error) { + _, ok := sc[id] + if ok { + return nil, fmt.Errorf("subscriber for %s already exists", id) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + sc[id] = cancelFunc + return ctx, nil +} + +func (sc subscriptionCancellations) Cancel(id string) (ok bool) { + cancelFunc, ok := sc[id] + if !ok { + return false + } + + cancelFunc() + delete(sc, id) + return true +} + +func (sc subscriptionCancellations) CancelAll() { + for _, cancelFunc := range sc { + cancelFunc() + } +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v1.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v1.go new file mode 100644 index 00000000000..e08dda1b604 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v1.go @@ -0,0 +1,70 @@ +package subscription + +import ( + "context" + "sync" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/execution" +) + +type ExecutorV1Pool struct { + ExecutionHandler *execution.Handler + executorPool *sync.Pool +} + +func NewExecutorV1Pool(executionHandler *execution.Handler) *ExecutorV1Pool { + return &ExecutorV1Pool{ + ExecutionHandler: executionHandler, + executorPool: &sync.Pool{ + New: func() interface{} { + return &ExecutorV1{} + }, + }, + } +} + +func (e *ExecutorV1Pool) Get(payload []byte) (Executor, error) { + engineExecutor, node, executionContext, err := e.ExecutionHandler.Handle(payload, []byte("")) + if err != nil { + return nil, err + } + + executor := e.executorPool.Get().(*ExecutorV1) + executor.engineExecutor = engineExecutor + executor.rootNode = node + executor.executionContext = executionContext + + return executor, nil +} + +func (e *ExecutorV1Pool) Put(executor Executor) error { + executor.Reset() + e.executorPool.Put(executor) + return nil +} + +type ExecutorV1 struct { + engineExecutor *execution.Executor + rootNode execution.RootNode + executionContext execution.Context +} + +func (e *ExecutorV1) Execute(writer resolve.FlushWriter) error { + return e.engineExecutor.Execute(e.executionContext, e.rootNode, writer) +} + +func (e *ExecutorV1) OperationType() ast.OperationType { + return e.rootNode.OperationType() +} + +func (e *ExecutorV1) SetContext(context context.Context) { + e.executionContext.Context = context +} + +func (e *ExecutorV1) Reset() { + e.engineExecutor = nil + e.rootNode = nil + e.executionContext = execution.Context{} +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v2.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v2.go new file mode 100644 index 00000000000..757e2005568 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/executor_v2.go @@ -0,0 +1,88 @@ +package subscription + +import ( + "bytes" + "context" + "sync" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" +) + +// ExecutorV2Pool - provides reusable executors +type ExecutorV2Pool struct { + engine *graphql.ExecutionEngineV2 + executorPool *sync.Pool + connectionInitReqCtx context.Context // connectionInitReqCtx - holds original request context used to establish websocket connection +} + +func NewExecutorV2Pool(engine *graphql.ExecutionEngineV2, connectionInitReqCtx context.Context) *ExecutorV2Pool { + return &ExecutorV2Pool{ + engine: engine, + executorPool: &sync.Pool{ + New: func() interface{} { + return &ExecutorV2{} + }, + }, + connectionInitReqCtx: connectionInitReqCtx, + } +} + +func (e *ExecutorV2Pool) Get(payload []byte) (Executor, error) { + operation := graphql.Request{} + err := graphql.UnmarshalRequest(bytes.NewReader(payload), &operation) + if err != nil { + return nil, err + } + + return &ExecutorV2{ + engine: e.engine, + operation: &operation, + context: context.Background(), + reqCtx: e.connectionInitReqCtx, + }, nil +} + +func (e *ExecutorV2Pool) Put(executor Executor) error { + executor.Reset() + e.executorPool.Put(executor) + return nil +} + +type ExecutorV2 struct { + engine *graphql.ExecutionEngineV2 + operation *graphql.Request + context context.Context + reqCtx context.Context +} + +func (e *ExecutorV2) Execute(writer resolve.FlushWriter) error { + options := make([]graphql.ExecutionOptionsV2, 0) + switch ctx := e.reqCtx.(type) { + case *InitialHttpRequestContext: + options = append(options, graphql.WithAdditionalHttpHeaders(ctx.Request.Header)) + } + + return e.engine.Execute(e.context, e.operation, writer, options...) +} + +func (e *ExecutorV2) OperationType() ast.OperationType { + opType, err := e.operation.OperationType() + if err != nil { + return ast.OperationTypeUnknown + } + + return ast.OperationType(opType) +} + +func (e *ExecutorV2) SetContext(context context.Context) { + e.context = context +} + +func (e *ExecutorV2) Reset() { + e.engine = nil + e.operation = nil + e.context = context.Background() + e.reqCtx = context.TODO() +} diff --git a/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/handler.go b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/handler.go new file mode 100644 index 00000000000..0395eb57a85 --- /dev/null +++ b/vendor/github.com/TykTechnologies/graphql-go-tools/pkg/subscription/handler.go @@ -0,0 +1,452 @@ +package subscription + +import ( + "bytes" + "context" + "encoding/json" + "sync" + "time" + + "github.com/jensneuse/abstractlogger" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/engine/resolve" + "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" +) + +const ( + MessageTypeConnectionInit = "connection_init" + MessageTypeConnectionAck = "connection_ack" + MessageTypeConnectionError = "connection_error" + MessageTypeConnectionTerminate = "connection_terminate" + MessageTypeConnectionKeepAlive = "ka" + MessageTypeStart = "start" + MessageTypeStop = "stop" + MessageTypeData = "data" + MessageTypeError = "error" + MessageTypeComplete = "complete" + + DefaultKeepAliveInterval = "15s" + DefaultSubscriptionUpdateInterval = "1s" +) + +// Message defines the actual subscription message wich will be passed from client to server and vice versa. +type Message struct { + Id string `json:"id"` + Type string `json:"type"` + Payload json.RawMessage `json:"payload"` +} + +// client provides an interface which can be implemented by any possible subscription client like websockets, mqtt, etc. +type Client interface { + // ReadFromClient will invoke a read operation from the client connection. + ReadFromClient() (*Message, error) + // WriteToClient will invoke a write operation to the client connection. + WriteToClient(Message) error + // IsConnected will indicate if a connection is still established. + IsConnected() bool + // Disconnect will close the connection between server and client. + Disconnect() error +} + +// ExecutorPool is an abstraction for creating executors +type ExecutorPool interface { + Get(payload []byte) (Executor, error) + Put(executor Executor) error +} + +// Executor is an abstraction for executing a GraphQL engine +type Executor interface { + Execute(writer resolve.FlushWriter) error + OperationType() ast.OperationType + SetContext(context context.Context) + Reset() +} + +// Handler is the actual subscription handler which will keep track on how to handle messages coming from the client. +type Handler struct { + logger abstractlogger.Logger + // client will hold the subscription client implementation. + client Client + // keepAliveInterval is the actual interval on which the server send keep alive messages to the client. + keepAliveInterval time.Duration + // subscriptionUpdateInterval is the actual interval on which the server sends subscription updates to the client. + subscriptionUpdateInterval time.Duration + // subCancellations is map containing the cancellation functions to every active subscription. + subCancellations subscriptionCancellations + // executorPool is responsible to create and hold executors. + executorPool ExecutorPool + // bufferPool will hold buffers. + bufferPool *sync.Pool +} + +// NewHandler creates a new subscription handler. +func NewHandler(logger abstractlogger.Logger, client Client, executorPool ExecutorPool) (*Handler, error) { + keepAliveInterval, err := time.ParseDuration(DefaultKeepAliveInterval) + if err != nil { + return nil, err + } + + subscriptionUpdateInterval, err := time.ParseDuration(DefaultSubscriptionUpdateInterval) + if err != nil { + return nil, err + } + + return &Handler{ + logger: logger, + client: client, + keepAliveInterval: keepAliveInterval, + subscriptionUpdateInterval: subscriptionUpdateInterval, + subCancellations: subscriptionCancellations{}, + executorPool: executorPool, + bufferPool: &sync.Pool{ + New: func() interface{} { + writer := graphql.NewEngineResultWriterFromBuffer(bytes.NewBuffer(make([]byte, 0, 1024))) + return &writer + }, + }, + }, nil +} + +// Handle will handle the subscription connection. +func (h *Handler) Handle(ctx context.Context) { + defer func() { + h.subCancellations.CancelAll() + }() + + for { + if !h.client.IsConnected() { + h.logger.Debug("subscription.Handler.Handle()", + abstractlogger.String("message", "client has disconnected"), + ) + + return + } + + message, err := h.client.ReadFromClient() + if err != nil { + h.logger.Error("subscription.Handler.Handle()", + abstractlogger.Error(err), + abstractlogger.Any("message", message), + ) + + h.handleConnectionError("could not read message from client") + } else if message != nil { + switch message.Type { + case MessageTypeConnectionInit: + h.handleInit() + go h.handleKeepAlive(ctx) + case MessageTypeStart: + h.handleStart(message.Id, message.Payload) + case MessageTypeStop: + h.handleStop(message.Id) + case MessageTypeConnectionTerminate: + h.handleConnectionTerminate() + return + } + } + + select { + case <-ctx.Done(): + return + default: + continue + } + } +} + +// ChangeKeepAliveInterval can be used to change the keep alive interval. +func (h *Handler) ChangeKeepAliveInterval(d time.Duration) { + h.keepAliveInterval = d +} + +// ChangeSubscriptionUpdateInterval can be used to change the update interval. +func (h *Handler) ChangeSubscriptionUpdateInterval(d time.Duration) { + h.subscriptionUpdateInterval = d +} + +// handleInit will handle an init message. +func (h *Handler) handleInit() { + ackMessage := Message{ + Type: MessageTypeConnectionAck, + } + + err := h.client.WriteToClient(ackMessage) + if err != nil { + h.logger.Error("subscription.Handler.handleInit()", + abstractlogger.Error(err), + ) + } +} + +// handleStart will handle s start message. +func (h *Handler) handleStart(id string, payload []byte) { + executor, err := h.executorPool.Get(payload) + if err != nil { + h.logger.Error("subscription.Handler.handleStart()", + abstractlogger.Error(err), + ) + + h.handleError(id, graphql.RequestErrorsFromError(err)) + return + } + + if err = h.handleOnBeforeStart(executor); err != nil { + h.handleError(id, graphql.RequestErrorsFromError(err)) + return + } + + if executor.OperationType() == ast.OperationTypeSubscription { + ctx, subsErr := h.subCancellations.Add(id) + if subsErr != nil { + h.handleError(id, graphql.RequestErrorsFromError(subsErr)) + return + } + go h.startSubscription(ctx, id, executor) + return + } + + go h.handleNonSubscriptionOperation(id, executor) +} + +func (h *Handler) handleOnBeforeStart(executor Executor) error { + switch e := executor.(type) { + case *ExecutorV2: + if hook := e.engine.GetWebsocketBeforeStartHook(); hook != nil { + return hook.OnBeforeStart(e.reqCtx, e.operation) + } + case *ExecutorV1: + // do nothing + } + + return nil +} + +// handleNonSubscriptionOperation will handle a non-subscription operation like a query or a mutation. +func (h *Handler) handleNonSubscriptionOperation(id string, executor Executor) { + defer func() { + err := h.executorPool.Put(executor) + if err != nil { + h.logger.Error("subscription.Handle.handleNonSubscriptionOperation()", + abstractlogger.Error(err), + ) + } + }() + + buf := h.bufferPool.Get().(*graphql.EngineResultWriter) + buf.Reset() + + defer h.bufferPool.Put(buf) + + // err := executor.Execute(executionContext, node, buf) + err := executor.Execute(buf) + if err != nil { + h.logger.Error("subscription.Handle.handleNonSubscriptionOperation()", + abstractlogger.Error(err), + ) + + h.handleError(id, graphql.RequestErrorsFromError(err)) + return + } + + h.logger.Debug("subscription.Handle.handleNonSubscriptionOperation()", + abstractlogger.ByteString("execution_result", buf.Bytes()), + ) + + h.sendData(id, buf.Bytes()) + h.sendComplete(id) +} + +// startSubscription will invoke the actual subscription. +func (h *Handler) startSubscription(ctx context.Context, id string, executor Executor) { + defer func() { + err := h.executorPool.Put(executor) + if err != nil { + h.logger.Error("subscription.Handle.startSubscription()", + abstractlogger.Error(err), + ) + } + }() + + executor.SetContext(ctx) + buf := h.bufferPool.Get().(*graphql.EngineResultWriter) + buf.Reset() + + defer h.bufferPool.Put(buf) + + h.executeSubscription(buf, id, executor) + + for { + buf.Reset() + select { + case <-ctx.Done(): + return + case <-time.After(h.subscriptionUpdateInterval): + h.executeSubscription(buf, id, executor) + } + } + +} + +// executeSubscription will keep execution the subscription until it ends. +func (h *Handler) executeSubscription(buf *graphql.EngineResultWriter, id string, executor Executor) { + buf.SetFlushCallback(func(data []byte) { + h.logger.Debug("subscription.Handle.executeSubscription()", + abstractlogger.ByteString("execution_result", data), + ) + h.sendData(id, data) + }) + defer buf.SetFlushCallback(nil) + + err := executor.Execute(buf) + if err != nil { + h.logger.Error("subscription.Handle.executeSubscription()", + abstractlogger.Error(err), + ) + + h.handleError(id, graphql.RequestErrorsFromError(err)) + return + } + + if buf.Len() > 0 { + data := buf.Bytes() + h.logger.Debug("subscription.Handle.executeSubscription()", + abstractlogger.ByteString("execution_result", data), + ) + h.sendData(id, data) + } +} + +// handleStop will handle a stop message, +func (h *Handler) handleStop(id string) { + h.subCancellations.Cancel(id) + h.sendComplete(id) +} + +// sendData will send a data message to the client. +func (h *Handler) sendData(id string, responseData []byte) { + dataMessage := Message{ + Id: id, + Type: MessageTypeData, + Payload: responseData, + } + + err := h.client.WriteToClient(dataMessage) + if err != nil { + h.logger.Error("subscription.Handler.sendData()", + abstractlogger.Error(err), + ) + } +} + +// nolint +// sendComplete will send a complete message to the client. +func (h *Handler) sendComplete(id string) { + completeMessage := Message{ + Id: id, + Type: MessageTypeComplete, + Payload: nil, + } + + err := h.client.WriteToClient(completeMessage) + if err != nil { + h.logger.Error("subscription.Handler.sendComplete()", + abstractlogger.Error(err), + ) + } +} + +// handleConnectionTerminate will handle a comnnection terminate message. +func (h *Handler) handleConnectionTerminate() { + err := h.client.Disconnect() + if err != nil { + h.logger.Error("subscription.Handler.handleConnectionTerminate()", + abstractlogger.Error(err), + ) + } +} + +// handleKeepAlive will handle the keep alive loop. +func (h *Handler) handleKeepAlive(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(h.keepAliveInterval): + h.sendKeepAlive() + } + } +} + +// sendKeepAlive will send a keep alive message to the client. +func (h *Handler) sendKeepAlive() { + keepAliveMessage := Message{ + Type: MessageTypeConnectionKeepAlive, + } + + err := h.client.WriteToClient(keepAliveMessage) + if err != nil { + h.logger.Error("subscription.Handler.sendKeepAlive()", + abstractlogger.Error(err), + ) + } +} + +// handleConnectionError will handle a connection error message. +func (h *Handler) handleConnectionError(errorPayload interface{}) { + payloadBytes, err := json.Marshal(errorPayload) + if err != nil { + h.logger.Error("subscription.Handler.handleConnectionError()", + abstractlogger.Error(err), + abstractlogger.Any("errorPayload", errorPayload), + ) + } + + connectionErrorMessage := Message{ + Type: MessageTypeConnectionError, + Payload: payloadBytes, + } + + err = h.client.WriteToClient(connectionErrorMessage) + if err != nil { + h.logger.Error("subscription.Handler.handleConnectionError()", + abstractlogger.Error(err), + ) + + err := h.client.Disconnect() + if err != nil { + h.logger.Error("subscription.Handler.handleError()", + abstractlogger.Error(err), + ) + } + } +} + +// handleError will handle an error message. +func (h *Handler) handleError(id string, errors graphql.RequestErrors) { + payloadBytes, err := json.Marshal(errors) + if err != nil { + h.logger.Error("subscription.Handler.handleError()", + abstractlogger.Error(err), + abstractlogger.Any("errors", errors), + ) + } + + errorMessage := Message{ + Id: id, + Type: MessageTypeError, + Payload: payloadBytes, + } + + err = h.client.WriteToClient(errorMessage) + if err != nil { + h.logger.Error("subscription.Handler.handleError()", + abstractlogger.Error(err), + ) + } +} + +// ActiveSubscriptions will return the actual number of active subscriptions for that client. +func (h *Handler) ActiveSubscriptions() int { + return len(h.subCancellations) +} diff --git a/vendor/github.com/TykTechnologies/leakybucket/.drone.yml b/vendor/github.com/TykTechnologies/leakybucket/.drone.yml new file mode 100644 index 00000000000..c115285ae6a --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/.drone.yml @@ -0,0 +1,19 @@ +env: +- REPORT_CARD_GITHUB_STATUS_TOKEN=$$report_card_github_status_token +- REPORT_CARD_GITHUB_REPO_TOKEN=$$report_card_github_repo_token +image: clever/drone-go:1.6 +notify: + email: + recipients: + - drone@clever.com + slack: + on_failure: true + on_started: false + on_success: false + webhook_url: $$slack_webhook +script: +- sudo pip install -q git+https://$REPORT_CARD_GITHUB_REPO_TOKEN@github.com/Clever/report-card.git; GITHUB_API_TOKEN=$REPORT_CARD_GITHUB_STATUS_TOKEN report-card --publish || true +- ulimit -n 2560 +- make test +services: +- redis:2.6 diff --git a/vendor/github.com/TykTechnologies/leakybucket/.gitignore b/vendor/github.com/TykTechnologies/leakybucket/.gitignore new file mode 100644 index 00000000000..19b887e04ea --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/.gitignore @@ -0,0 +1,2 @@ +*~ +c.out diff --git a/vendor/github.com/TykTechnologies/leakybucket/LICENSE b/vendor/github.com/TykTechnologies/leakybucket/LICENSE new file mode 100644 index 00000000000..a6da337d1ae --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Clever, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/TykTechnologies/leakybucket/Makefile b/vendor/github.com/TykTechnologies/leakybucket/Makefile new file mode 100644 index 00000000000..2375d33f876 --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/Makefile @@ -0,0 +1,15 @@ +include golang.mk +.DEFAULT_GOAL := test # override default goal set in library makefile + +.PHONY: test $(PKGS) +SHELL := /bin/bash +PKG := github.com/Clever/leakybucket +PKGS := $(shell go list ./...) +$(eval $(call golang-version-check,1.6)) + +export REDIS_URL ?= localhost:6379 + +test: $(PKGS) +$(PKGS): golang-test-all-deps + go get -d -t $@ + $(call golang-test-all,$@) diff --git a/vendor/github.com/TykTechnologies/leakybucket/README.md b/vendor/github.com/TykTechnologies/leakybucket/README.md new file mode 100644 index 00000000000..741035dee5b --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/README.md @@ -0,0 +1,24 @@ +## leakybucket + +Leaky bucket implementation in Go with your choice of data storage layer. + +## Why + +[Leaky buckets](https://en.wikipedia.org/wiki/Leaky_bucket) are useful in a number of settings, especially rate limiting. + +## Documentation + +[![GoDoc](https://godoc.org/github.com/Clever/leakybucket?status.png)](https://godoc.org/github.com/Clever/leakybucket). + +## Tests + +leakybucket is built and tested against Go 1.5. +Ensure this is the version of Go you're running with `go version`. +Make sure your GOPATH is set, e.g. `export GOPATH=~/go`. +Clone the repository to `$GOPATH/src/github.com/Clever/leakybucket`. + +If you have done all of the above, then you should be able to run + +``` +make test +``` diff --git a/vendor/github.com/TykTechnologies/leakybucket/bucket.go b/vendor/github.com/TykTechnologies/leakybucket/bucket.go new file mode 100644 index 00000000000..73e5dc36988 --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/bucket.go @@ -0,0 +1,40 @@ +package leakybucket + +import ( + "errors" + "time" +) + +var ( + // ErrorFull is returned when the amount requested to add exceeds the remaining space in the bucket. + ErrorFull = errors.New("add exceeds free capacity") +) + +// Bucket interface for interacting with leaky buckets: https://en.wikipedia.org/wiki/Leaky_bucket +type Bucket interface { + // Capacity of the bucket. + Capacity() uint + + // Remaining space in the bucket. + Remaining() uint + + // Reset returns when the bucket will be drained. + Reset() time.Time + + // Add to the bucket. Returns bucket state after adding. + Add(uint) (BucketState, error) +} + +// BucketState is a snapshot of a bucket's properties. +type BucketState struct { + Capacity uint + Remaining uint + Reset time.Time +} + +// Storage interface for generating buckets keyed by a string. +type Storage interface { + // Create a bucket with a name, capacity, and rate. + // rate is how long it takes for full capacity to drain. + Create(name string, capacity uint, rate time.Duration) (Bucket, error) +} diff --git a/vendor/github.com/TykTechnologies/leakybucket/doc.go b/vendor/github.com/TykTechnologies/leakybucket/doc.go new file mode 100644 index 00000000000..8182fc70b82 --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/doc.go @@ -0,0 +1,2 @@ +// Package leakybucket provides a leaky bucket implementation with support for various backends. +package leakybucket diff --git a/vendor/github.com/TykTechnologies/leakybucket/golang.mk b/vendor/github.com/TykTechnologies/leakybucket/golang.mk new file mode 100644 index 00000000000..8a6e106f646 --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/golang.mk @@ -0,0 +1,133 @@ +# This is the default Clever Golang Makefile. +# Please do not alter this file directly. +GOLANG_MK_VERSION := 0.1.0 + +SHELL := /bin/bash +.PHONY: golang-godep-vendor golang-test-deps $(GODEP) + +# This block checks and confirms that the proper Go toolchain version is installed. +# arg1: golang version +define golang-version-check +GOVERSION := $(shell go version | grep $(1)) +_ := $(if \ + $(shell go version | grep $(1)), \ + @echo "", \ + $(error "must be running Go version $(1)")) +endef + +export GO15VENDOREXPERIMENT=1 + +# FGT is a utility that exits with 1 whenever any stderr/stdout output is recieved. +FGT := $(GOPATH)/bin/fgt +$(FGT): + go get github.com/GeertJohan/fgt + +# Godep is a tool used to manage Golang dependencies in the style of the Go 1.5 +# vendoring experiment. +GODEP := $(GOPATH)/bin/godep +$(GODEP): + go get -u github.com/tools/godep + +# Golint is a tool for linting Golang code for common errors. +GOLINT := $(GOPATH)/bin/golint +$(GOLINT): + go get github.com/golang/lint/golint + +# golang-vendor-deps installs all dependencies needed for different test cases. +golang-godep-vendor-deps: $(GODEP) + +# golang-godep-vendor is a target for saving dependencies with the godep tool +# to the vendor/ directory. All nested vendor/ directories are deleted as they +# are not handled well by the Go toolchain. +# arg1: pkg path +define golang-godep-vendor +$(GODEP) save $(1) +@# remove any nested vendor directories +find vendor/ -path '*/vendor' -type d | xargs -IX rm -r X +endef + +# golang-fmt-deps requires the FGT tool for checking output +golang-fmt-deps: $(FGT) + +# golang-fmt checks that all golang files in the pkg are formatted correctly. +# arg1: pkg path +define golang-fmt +@echo "FORMATTING $(1)..." +@$(FGT) gofmt -l=true $(GOPATH)/src/$(1)/*.go +endef + +# golang-lint-deps requires the golint tool for golang linting. +golang-lint-deps: $(GOLINT) + +# golang-lint calls golint on all golang files in the pkg. +# arg1: pkg path +define golang-lint +@echo "LINTING $(1)..." +@$(GOLINT) $(GOPATH)/src/$(1)/*.go +endef + +# golang-lint-deps-strict requires the golint tool for golang linting. +golang-lint-deps-strict: $(GOLINT) $(FGT) + +# golang-lint-strict calls golint on all golang files in the pkg and fails if any lint +# errors are found. +# arg1: pkg path +define golang-lint-strict +@echo "LINTING $(1)..." +@$(FGT) $(GOLINT) $(GOPATH)/src/$(1)/*.go +endef + +# golang-test-deps is here for consistency +golang-test-deps: + +# golang-test uses the Go toolchain to run all tests in the pkg. +# arg1: pkg path +define golang-test +@echo "TESTING $(1)..." +@go test -v $(1) +endef + +# golang-test-strict-deps is here for consistency +golang-test-strict-deps: + +# golang-test-strict uses the Go toolchain to run all tests in the pkg with the race flag +# arg1: pkg path +define golang-test-strict +@echo "TESTING $(1)..." +@go test -v -race $(1) +endef + +# golang-vet-deps is here for consistency +golang-vet-deps: + +# golang-vet uses the Go toolchain to vet all the pkg for common mistakes. +# arg1: pkg path +define golang-vet +@echo "VETTING $(1)..." +@go vet $(GOPATH)/src/$(1)/*.go +endef + +# golang-test-all-deps installs all dependencies needed for different test cases. +golang-test-all-deps: golang-fmt-deps golang-lint-deps golang-test-deps golang-vet-deps + +# golang-test-all calls fmt, lint, vet and test on the specified pkg. +# arg1: pkg path +define golang-test-all +$(call golang-fmt,$(1)) +$(call golang-lint,$(1)) +$(call golang-vet,$(1)) +$(call golang-test,$(1)) +endef + +# golang-test-all-strict-deps: installs all dependencies needed for different test cases. +golang-test-all-strict-deps: golang-fmt-deps golang-lint-deps-strict golang-test-strict-deps golang-vet-deps + +# golang-test-all-strict calls fmt, lint, vet and test on the specified pkg with strict +# requirements that no errors are thrown while linting. +# arg1: pkg path +define golang-test-all-strict +$(call golang-fmt,$(1)) +$(call golang-lint-strict,$(1)) +$(call golang-vet,$(1)) +$(call golang-test-strict,$(1)) +endef diff --git a/vendor/github.com/TykTechnologies/leakybucket/memorycache/cache.go b/vendor/github.com/TykTechnologies/leakybucket/memorycache/cache.go new file mode 100644 index 00000000000..361b97b5e3e --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/memorycache/cache.go @@ -0,0 +1,84 @@ +package memorycache + +import ( + "sync" + "time" +) + +// Cache is a synchronised map of items that auto-expire once stale +type Cache struct { + mutex sync.RWMutex + ttl time.Duration + items map[string]*Item +} + +// Set is a thread-safe way to add new items to the map +func (cache *Cache) Set(key string, data *bucket) { + cache.mutex.Lock() + item := &Item{data: data} + item.touch(cache.ttl) + cache.items[key] = item + cache.mutex.Unlock() +} + +// Get is a thread-safe way to lookup items +// Every lookup, also touches the item, hence extending it's life +func (cache *Cache) Get(key string) (data *bucket, found bool) { + cache.mutex.Lock() + item, exists := cache.items[key] + if !exists || item.expired() { + data = &bucket{} + found = false + } else { + item.touch(cache.ttl) + data = item.data + found = true + } + cache.mutex.Unlock() + return +} + +// Count returns the number of items in the cache +// (helpful for tracking memory leaks) +func (cache *Cache) Count() int { + cache.mutex.RLock() + count := len(cache.items) + cache.mutex.RUnlock() + return count +} + +func (cache *Cache) cleanup() { + cache.mutex.Lock() + for key, item := range cache.items { + if item.expired() { + delete(cache.items, key) + } + } + cache.mutex.Unlock() +} + +func (cache *Cache) startCleanupTimer() { + duration := cache.ttl + if duration < time.Second { + duration = time.Second + } + ticker := time.Tick(duration) + go (func() { + for { + select { + case <-ticker: + cache.cleanup() + } + } + })() +} + +// NewCache is a helper to create instance of the Cache struct +func NewCache(duration time.Duration) *Cache { + cache := &Cache{ + ttl: duration, + items: map[string]*Item{}, + } + cache.startCleanupTimer() + return cache +} diff --git a/vendor/github.com/TykTechnologies/leakybucket/memorycache/item.go b/vendor/github.com/TykTechnologies/leakybucket/memorycache/item.go new file mode 100644 index 00000000000..f344d6203ff --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/memorycache/item.go @@ -0,0 +1,32 @@ +package memorycache + +import ( + "sync" + "time" +) + +// Item represents a record in the cache map +type Item struct { + sync.RWMutex + data *bucket + expires *time.Time +} + +func (item *Item) touch(duration time.Duration) { + item.Lock() + expiration := time.Now().Add(duration) + item.expires = &expiration + item.Unlock() +} + +func (item *Item) expired() bool { + var value bool + item.RLock() + if item.expires == nil { + value = true + } else { + value = item.expires.Before(time.Now()) + } + item.RUnlock() + return value +} diff --git a/vendor/github.com/TykTechnologies/leakybucket/memorycache/memorycache.go b/vendor/github.com/TykTechnologies/leakybucket/memorycache/memorycache.go new file mode 100644 index 00000000000..82477fff067 --- /dev/null +++ b/vendor/github.com/TykTechnologies/leakybucket/memorycache/memorycache.go @@ -0,0 +1,74 @@ +package memorycache + +import ( + "sync" + "time" + + "github.com/TykTechnologies/leakybucket" +) + +type bucket struct { + capacity uint + remaining uint + reset time.Time + rate time.Duration + mutex sync.Mutex +} + +func (b *bucket) Capacity() uint { + return b.capacity +} + +// Remaining space in the bucket. +func (b *bucket) Remaining() uint { + return b.remaining +} + +// Reset returns when the bucket will be drained. +func (b *bucket) Reset() time.Time { + return b.reset +} + +// Add to the bucket. +func (b *bucket) Add(amount uint) (leakybucket.BucketState, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + if time.Now().After(b.reset) { + b.reset = time.Now().Add(b.rate) + b.remaining = b.capacity + } + if amount > b.remaining { + return leakybucket.BucketState{Capacity: b.capacity, Remaining: b.remaining, Reset: b.reset}, leakybucket.ErrorFull + } + b.remaining -= amount + return leakybucket.BucketState{Capacity: b.capacity, Remaining: b.remaining, Reset: b.reset}, nil +} + +// Storage is a non thread-safe in-memory leaky bucket factory. +type Storage struct { + buckets *Cache +} + +// New initializes the in-memory bucket store. +func New() *Storage { + return &Storage{ + buckets: NewCache(10 * time.Minute), + } +} + +// Create a bucket. +func (s *Storage) Create(name string, capacity uint, rate time.Duration) (leakybucket.Bucket, error) { + b, ok := s.buckets.Get(name) + if ok { + return b, nil + } + + b = &bucket{ + capacity: capacity, + remaining: capacity, + reset: time.Now().Add(rate), + rate: rate, + } + s.buckets.Set(name, b) + return b, nil +} diff --git a/vendor/github.com/TykTechnologies/murmur3/.gitignore b/vendor/github.com/TykTechnologies/murmur3/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/TykTechnologies/murmur3/.go-version b/vendor/github.com/TykTechnologies/murmur3/.go-version new file mode 100644 index 00000000000..6dbba9cf65e --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/.go-version @@ -0,0 +1 @@ +1.12.17 diff --git a/vendor/github.com/TykTechnologies/murmur3/.travis.yml b/vendor/github.com/TykTechnologies/murmur3/.travis.yml new file mode 100644 index 00000000000..9bfca9c8b22 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.x + - master + +script: go test diff --git a/vendor/github.com/TykTechnologies/murmur3/LICENSE b/vendor/github.com/TykTechnologies/murmur3/LICENSE new file mode 100644 index 00000000000..2a46fd75007 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013, Sébastien Paolacci. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/TykTechnologies/murmur3/README.md b/vendor/github.com/TykTechnologies/murmur3/README.md new file mode 100644 index 00000000000..e463678a05e --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/README.md @@ -0,0 +1,86 @@ +murmur3 +======= + +[![Build Status](https://travis-ci.org/spaolacci/murmur3.svg?branch=master)](https://travis-ci.org/spaolacci/murmur3) + +Native Go implementation of Austin Appleby's third MurmurHash revision (aka +MurmurHash3). + +Reference algorithm has been slightly hacked as to support the streaming mode +required by Go's standard [Hash interface](http://golang.org/pkg/hash/#Hash). + + +Benchmarks +---------- + +Go tip as of 2014-06-12 (i.e almost go1.3), core i7 @ 3.4 Ghz. All runs +include hasher instantiation and sequence finalization. + +
+
+Benchmark32_1        500000000     7.69 ns/op      130.00 MB/s
+Benchmark32_2        200000000     8.83 ns/op      226.42 MB/s
+Benchmark32_4        500000000     7.99 ns/op      500.39 MB/s
+Benchmark32_8        200000000     9.47 ns/op      844.69 MB/s
+Benchmark32_16       100000000     12.1 ns/op     1321.61 MB/s
+Benchmark32_32       100000000     18.3 ns/op     1743.93 MB/s
+Benchmark32_64        50000000     30.9 ns/op     2071.64 MB/s
+Benchmark32_128       50000000     57.6 ns/op     2222.96 MB/s
+Benchmark32_256       20000000      116 ns/op     2188.60 MB/s
+Benchmark32_512       10000000      226 ns/op     2260.59 MB/s
+Benchmark32_1024       5000000      452 ns/op     2263.73 MB/s
+Benchmark32_2048       2000000      891 ns/op     2296.02 MB/s
+Benchmark32_4096       1000000     1787 ns/op     2290.92 MB/s
+Benchmark32_8192        500000     3593 ns/op     2279.68 MB/s
+Benchmark128_1       100000000     26.1 ns/op       38.33 MB/s
+Benchmark128_2       100000000     29.0 ns/op       69.07 MB/s
+Benchmark128_4        50000000     29.8 ns/op      134.17 MB/s
+Benchmark128_8        50000000     31.6 ns/op      252.86 MB/s
+Benchmark128_16      100000000     26.5 ns/op      603.42 MB/s
+Benchmark128_32      100000000     28.6 ns/op     1117.15 MB/s
+Benchmark128_64       50000000     35.5 ns/op     1800.97 MB/s
+Benchmark128_128      50000000     50.9 ns/op     2515.50 MB/s
+Benchmark128_256      20000000     76.9 ns/op     3330.11 MB/s
+Benchmark128_512      20000000      135 ns/op     3769.09 MB/s
+Benchmark128_1024     10000000      250 ns/op     4094.38 MB/s
+Benchmark128_2048      5000000      477 ns/op     4290.75 MB/s
+Benchmark128_4096      2000000      940 ns/op     4353.29 MB/s
+Benchmark128_8192      1000000     1838 ns/op     4455.47 MB/s
+
+
+ + +
+
+benchmark              Go1.0 MB/s    Go1.1 MB/s  speedup    Go1.2 MB/s  speedup    Go1.3 MB/s  speedup
+Benchmark32_1               98.90        118.59    1.20x        114.79    0.97x        130.00    1.13x
+Benchmark32_2              168.04        213.31    1.27x        210.65    0.99x        226.42    1.07x
+Benchmark32_4              414.01        494.19    1.19x        490.29    0.99x        500.39    1.02x
+Benchmark32_8              662.19        836.09    1.26x        836.46    1.00x        844.69    1.01x
+Benchmark32_16             917.46       1304.62    1.42x       1297.63    0.99x       1321.61    1.02x
+Benchmark32_32            1141.93       1737.54    1.52x       1728.24    0.99x       1743.93    1.01x
+Benchmark32_64            1289.47       2039.51    1.58x       2038.20    1.00x       2071.64    1.02x
+Benchmark32_128           1299.23       2097.63    1.61x       2177.13    1.04x       2222.96    1.02x
+Benchmark32_256           1369.90       2202.34    1.61x       2213.15    1.00x       2188.60    0.99x
+Benchmark32_512           1399.56       2255.72    1.61x       2264.49    1.00x       2260.59    1.00x
+Benchmark32_1024          1410.90       2285.82    1.62x       2270.99    0.99x       2263.73    1.00x
+Benchmark32_2048          1422.14       2297.62    1.62x       2269.59    0.99x       2296.02    1.01x
+Benchmark32_4096          1420.53       2307.81    1.62x       2273.43    0.99x       2290.92    1.01x
+Benchmark32_8192          1424.79       2312.87    1.62x       2286.07    0.99x       2279.68    1.00x
+Benchmark128_1               8.32         30.15    3.62x         30.84    1.02x         38.33    1.24x
+Benchmark128_2              16.38         59.72    3.65x         59.37    0.99x         69.07    1.16x
+Benchmark128_4              32.26        112.96    3.50x        114.24    1.01x        134.17    1.17x
+Benchmark128_8              62.68        217.88    3.48x        218.18    1.00x        252.86    1.16x
+Benchmark128_16            128.47        451.57    3.51x        474.65    1.05x        603.42    1.27x
+Benchmark128_32            246.18        910.42    3.70x        871.06    0.96x       1117.15    1.28x
+Benchmark128_64            449.05       1477.64    3.29x       1449.24    0.98x       1800.97    1.24x
+Benchmark128_128           762.61       2222.42    2.91x       2217.30    1.00x       2515.50    1.13x
+Benchmark128_256          1179.92       3005.46    2.55x       2931.55    0.98x       3330.11    1.14x
+Benchmark128_512          1616.51       3590.75    2.22x       3592.08    1.00x       3769.09    1.05x
+Benchmark128_1024         1964.36       3979.67    2.03x       4034.01    1.01x       4094.38    1.01x
+Benchmark128_2048         2225.07       4156.93    1.87x       4244.17    1.02x       4290.75    1.01x
+Benchmark128_4096         2360.15       4299.09    1.82x       4392.35    1.02x       4353.29    0.99x
+Benchmark128_8192         2411.50       4356.84    1.81x       4480.68    1.03x       4455.47    0.99x
+
+
+ diff --git a/vendor/github.com/TykTechnologies/murmur3/go.mod b/vendor/github.com/TykTechnologies/murmur3/go.mod new file mode 100644 index 00000000000..e4446739984 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/go.mod @@ -0,0 +1,3 @@ +module github.com/TykTechnologies/murmur3 + +go 1.12 diff --git a/vendor/github.com/TykTechnologies/murmur3/murmur.go b/vendor/github.com/TykTechnologies/murmur3/murmur.go new file mode 100644 index 00000000000..1252cf73a79 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/murmur.go @@ -0,0 +1,64 @@ +// Copyright 2013, Sébastien Paolacci. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package murmur3 implements Austin Appleby's non-cryptographic MurmurHash3. + + Reference implementation: + http://code.google.com/p/smhasher/wiki/MurmurHash3 + + History, characteristics and (legacy) perfs: + https://sites.google.com/site/murmurhash/ + https://sites.google.com/site/murmurhash/statistics +*/ +package murmur3 + +type bmixer interface { + bmix(p []byte) (tail []byte) + Size() (n int) + reset() +} + +type digest struct { + clen int // Digested input cumulative length. + tail []byte // 0 to Size()-1 bytes view of `buf'. + buf [16]byte // Expected (but not required) to be Size() large. + seed uint32 // Seed for initializing the hash. + bmixer +} + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + d.clen += n + + if len(d.tail) > 0 { + // Stick back pending bytes. + nfree := d.Size() - len(d.tail) // nfree ∈ [1, d.Size()-1]. + if nfree < len(p) { + // One full block can be formed. + block := append(d.tail, p[:nfree]...) + p = p[nfree:] + _ = d.bmix(block) // No tail. + } else { + // Tail's buf is large enough to prevent reallocs. + p = append(d.tail, p...) + } + } + + d.tail = d.bmix(p) + + // Keep own copy of the 0 to Size()-1 pending bytes. + nn := copy(d.buf[:], d.tail) + d.tail = d.buf[:nn] + + return n, nil +} + +func (d *digest) Reset() { + d.clen = 0 + d.tail = nil + d.bmixer.reset() +} diff --git a/vendor/github.com/TykTechnologies/murmur3/murmur128.go b/vendor/github.com/TykTechnologies/murmur3/murmur128.go new file mode 100644 index 00000000000..a4b618b5f3d --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/murmur128.go @@ -0,0 +1,203 @@ +package murmur3 + +import ( + //"encoding/binary" + "hash" + "unsafe" +) + +const ( + c1_128 = 0x87c37b91114253d5 + c2_128 = 0x4cf5ad432745937f +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest128) + _ Hash128 = new(digest128) + _ bmixer = new(digest128) +) + +// Hash128 represents a 128-bit hasher +// Hack: the standard api doesn't define any Hash128 interface. +type Hash128 interface { + hash.Hash + Sum128() (uint64, uint64) +} + +// digest128 represents a partial evaluation of a 128 bites hash. +type digest128 struct { + digest + h1 uint64 // Unfinalized running hash part 1. + h2 uint64 // Unfinalized running hash part 2. +} + +// New128 returns a 128-bit hasher +func New128() Hash128 { return New128WithSeed(0) } + +// New128WithSeed returns a 128-bit hasher set with explicit seed value +func New128WithSeed(seed uint32) Hash128 { + d := new(digest128) + d.seed = seed + d.bmixer = d + d.Reset() + return d +} + +func (d *digest128) Size() int { return 16 } + +func (d *digest128) reset() { d.h1, d.h2 = uint64(d.seed), uint64(d.seed) } + +func (d *digest128) Sum(b []byte) []byte { + h1, h2 := d.Sum128() + return append(b, + byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), + byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1), + + byte(h2>>56), byte(h2>>48), byte(h2>>40), byte(h2>>32), + byte(h2>>24), byte(h2>>16), byte(h2>>8), byte(h2), + ) +} + +func (d *digest128) bmix(p []byte) (tail []byte) { + h1, h2 := d.h1, d.h2 + + nblocks := len(p) / 16 + for i := 0; i < nblocks; i++ { + t := (*[2]uint64)(unsafe.Pointer(&p[i*16])) + k1, k2 := t[0], t[1] + + k1 *= c1_128 + k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + + h1 = (h1 << 27) | (h1 >> 37) // rotl64(h1, 27) + h1 += h2 + h1 = h1*5 + 0x52dce729 + + k2 *= c2_128 + k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + h2 = (h2 << 31) | (h2 >> 33) // rotl64(h2, 31) + h2 += h1 + h2 = h2*5 + 0x38495ab5 + } + d.h1, d.h2 = h1, h2 + return p[nblocks*d.Size():] +} + +func (d *digest128) Sum128() (h1, h2 uint64) { + + h1, h2 = d.h1, d.h2 + + var k1, k2 uint64 + switch len(d.tail) & 15 { + case 15: + k2 ^= uint64(d.tail[14]) << 48 + fallthrough + case 14: + k2 ^= uint64(d.tail[13]) << 40 + fallthrough + case 13: + k2 ^= uint64(d.tail[12]) << 32 + fallthrough + case 12: + k2 ^= uint64(d.tail[11]) << 24 + fallthrough + case 11: + k2 ^= uint64(d.tail[10]) << 16 + fallthrough + case 10: + k2 ^= uint64(d.tail[9]) << 8 + fallthrough + case 9: + k2 ^= uint64(d.tail[8]) << 0 + + k2 *= c2_128 + k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + fallthrough + + case 8: + k1 ^= uint64(d.tail[7]) << 56 + fallthrough + case 7: + k1 ^= uint64(d.tail[6]) << 48 + fallthrough + case 6: + k1 ^= uint64(d.tail[5]) << 40 + fallthrough + case 5: + k1 ^= uint64(d.tail[4]) << 32 + fallthrough + case 4: + k1 ^= uint64(d.tail[3]) << 24 + fallthrough + case 3: + k1 ^= uint64(d.tail[2]) << 16 + fallthrough + case 2: + k1 ^= uint64(d.tail[1]) << 8 + fallthrough + case 1: + k1 ^= uint64(d.tail[0]) << 0 + k1 *= c1_128 + k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + } + + h1 ^= uint64(d.clen) + h2 ^= uint64(d.clen) + + h1 += h2 + h2 += h1 + + h1 = fmix64(h1) + h2 = fmix64(h2) + + h1 += h2 + h2 += h1 + + return h1, h2 +} + +func fmix64(k uint64) uint64 { + k ^= k >> 33 + k *= 0xff51afd7ed558ccd + k ^= k >> 33 + k *= 0xc4ceb9fe1a85ec53 + k ^= k >> 33 + return k +} + +/* +func rotl64(x uint64, r byte) uint64 { + return (x << r) | (x >> (64 - r)) +} +*/ + +// Sum128 returns the MurmurHash3 sum of data. It is equivalent to the +// following sequence (without the extra burden and the extra allocation): +// hasher := New128() +// hasher.Write(data) +// return hasher.Sum128() +func Sum128(data []byte) (h1 uint64, h2 uint64) { return Sum128WithSeed(data, 0) } + +// Sum128WithSeed returns the MurmurHash3 sum of data. It is equivalent to the +// following sequence (without the extra burden and the extra allocation): +// hasher := New128WithSeed(seed) +// hasher.Write(data) +// return hasher.Sum128() +func Sum128WithSeed(data []byte, seed uint32) (h1 uint64, h2 uint64) { + d := &digest128{h1: uint64(seed), h2: uint64(seed)} + d.seed = seed + d.tail = d.bmix(data) + d.clen = len(data) + return d.Sum128() +} diff --git a/vendor/github.com/TykTechnologies/murmur3/murmur32.go b/vendor/github.com/TykTechnologies/murmur3/murmur32.go new file mode 100644 index 00000000000..bc89d268a3c --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/murmur32.go @@ -0,0 +1,154 @@ +package murmur3 + +// http://code.google.com/p/guava-libraries/source/browse/guava/src/com/google/common/hash/Murmur3_32HashFunction.java + +import ( + "hash" + "unsafe" +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest32) + _ hash.Hash32 = new(digest32) +) + +const ( + c1_32 uint32 = 0xcc9e2d51 + c2_32 uint32 = 0x1b873593 +) + +// digest32 represents a partial evaluation of a 32 bites hash. +type digest32 struct { + digest + h1 uint32 // Unfinalized running hash. +} + +func New32() hash.Hash32 { + d := new(digest32) + d.bmixer = d + d.Reset() + return d +} + +func (d *digest32) Size() int { return 4 } + +func (d *digest32) reset() { d.h1 = 0 } + +func (d *digest32) Sum(b []byte) []byte { + h := d.h1 + return append(b, byte(h>>24), byte(h>>16), byte(h>>8), byte(h)) +} + +// Digest as many blocks as possible. +func (d *digest32) bmix(p []byte) (tail []byte) { + h1 := d.h1 + + nblocks := len(p) / 4 + for i := 0; i < nblocks; i++ { + k1 := *(*uint32)(unsafe.Pointer(&p[i*4])) + + k1 *= c1_32 + k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) + k1 *= c2_32 + + h1 ^= k1 + h1 = (h1 << 13) | (h1 >> 19) // rotl32(h1, 13) + h1 = h1*5 + 0xe6546b64 + } + d.h1 = h1 + return p[nblocks*d.Size():] +} + +func (d *digest32) Sum32() (h1 uint32) { + + h1 = d.h1 + + var k1 uint32 + switch len(d.tail) & 3 { + case 3: + k1 ^= uint32(d.tail[2]) << 16 + fallthrough + case 2: + k1 ^= uint32(d.tail[1]) << 8 + fallthrough + case 1: + k1 ^= uint32(d.tail[0]) + k1 *= c1_32 + k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) + k1 *= c2_32 + h1 ^= k1 + } + + h1 ^= uint32(d.clen) + + h1 ^= h1 >> 16 + h1 *= 0x85ebca6b + h1 ^= h1 >> 13 + h1 *= 0xc2b2ae35 + h1 ^= h1 >> 16 + + return h1 +} + +/* +func rotl32(x uint32, r byte) uint32 { + return (x << r) | (x >> (32 - r)) +} +*/ + +// Sum32 returns the MurmurHash3 sum of data. It is equivalent to the +// following sequence (without the extra burden and the extra allocation): +// hasher := New32() +// hasher.Write(data) +// return hasher.Sum32() +func Sum32(data []byte) uint32 { + + var h1 uint32 = 0 + + nblocks := len(data) / 4 + var p uintptr + if len(data) > 0 { + p = uintptr(unsafe.Pointer(&data[0])) + } + p1 := p + uintptr(4*nblocks) + for ; p < p1; p += 4 { + k1 := *(*uint32)(unsafe.Pointer(p)) + + k1 *= c1_32 + k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) + k1 *= c2_32 + + h1 ^= k1 + h1 = (h1 << 13) | (h1 >> 19) // rotl32(h1, 13) + h1 = h1*5 + 0xe6546b64 + } + + tail := data[nblocks*4:] + + var k1 uint32 + switch len(tail) & 3 { + case 3: + k1 ^= uint32(tail[2]) << 16 + fallthrough + case 2: + k1 ^= uint32(tail[1]) << 8 + fallthrough + case 1: + k1 ^= uint32(tail[0]) + k1 *= c1_32 + k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) + k1 *= c2_32 + h1 ^= k1 + } + + h1 ^= uint32(len(data)) + + h1 ^= h1 >> 16 + h1 *= 0x85ebca6b + h1 ^= h1 >> 13 + h1 *= 0xc2b2ae35 + h1 ^= h1 >> 16 + + return h1 +} diff --git a/vendor/github.com/TykTechnologies/murmur3/murmur64.go b/vendor/github.com/TykTechnologies/murmur3/murmur64.go new file mode 100644 index 00000000000..65a410ae0b9 --- /dev/null +++ b/vendor/github.com/TykTechnologies/murmur3/murmur64.go @@ -0,0 +1,57 @@ +package murmur3 + +import ( + "hash" +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest64) + _ hash.Hash64 = new(digest64) + _ bmixer = new(digest64) +) + +// digest64 is half a digest128. +type digest64 digest128 + +// New64 returns a 64-bit hasher +func New64() hash.Hash64 { return New64WithSeed(0) } + +// New64WithSeed returns a 64-bit hasher set with explicit seed value +func New64WithSeed(seed uint32) hash.Hash64 { + d := (*digest64)(New128WithSeed(seed).(*digest128)) + return d +} + +func (d *digest64) Sum(b []byte) []byte { + h1 := d.Sum64() + return append(b, + byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), + byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1)) +} + +func (d *digest64) Sum64() uint64 { + h1, _ := (*digest128)(d).Sum128() + return h1 +} + +// Sum64 returns the MurmurHash3 sum of data. It is equivalent to the +// following sequence (without the extra burden and the extra allocation): +// hasher := New64() +// hasher.Write(data) +// return hasher.Sum64() +func Sum64(data []byte) uint64 { return Sum64WithSeed(data, 0) } + +// Sum64WithSeed returns the MurmurHash3 sum of data. It is equivalent to the +// following sequence (without the extra burden and the extra allocation): +// hasher := New64WithSeed(seed) +// hasher.Write(data) +// return hasher.Sum64() +func Sum64WithSeed(data []byte, seed uint32) uint64 { + d := &digest128{h1: uint64(seed), h2: uint64(seed)} + d.seed = seed + d.tail = d.bmix(data) + d.clen = len(data) + h1, _ := d.Sum128() + return h1 +} diff --git a/vendor/github.com/TykTechnologies/openid2go/LICENSE b/vendor/github.com/TykTechnologies/openid2go/LICENSE new file mode 100644 index 00000000000..a39cc60e8a6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Emanoel Xavier + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/LICENSE b/vendor/github.com/TykTechnologies/openid2go/openid/LICENSE new file mode 100644 index 00000000000..a39cc60e8a6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Emanoel Xavier + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/README.md b/vendor/github.com/TykTechnologies/openid2go/openid/README.md new file mode 100644 index 00000000000..c31447a9ad8 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/README.md @@ -0,0 +1,86 @@ +Go OpenId +=========== +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/emanoelxavier/openid2go/openid) +[![license](http://img.shields.io/badge/license-MIT-yellowgreen.svg?style=flat)](https://raw.githubusercontent.com/emanoelxavier/openid2go/master/openid/LICENSE) +## Summary + +A Go package that implements web service middlewares for authenticating identities represented by OpenID Connect (OIDC) ID Tokens. + +"OpenID Connect 1.0 is a simple identity layer on top of the OAuth 2.0 protocol. It enables Clients to verify the identity of the End-User based on the authentication performed by an Authorization Server" - [OpenID Connect](http://openid.net/specs/openid-connect-core-1_0.html) + +## Installation + +go get github.com/emanoelxavier/openid2go/openid + +## Example +This example demonstrates how to use this package to validate incoming ID Tokens. It initializes the Configuration with the desired providers (OPs) and registers two middlewares: openid.Authenticate and openid.AuthenticateUser. The former performs the token validation while the latter, in addition to that, will forward the user information to the next handler. + +```go +import ( + "fmt" + "net/http" + + "github.com/emanoelxavier/openid2go/openid" +) + +func AuthenticatedHandler(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "The user was authenticated!") +} + +func AuthenticatedHandlerWithUser(u *openid.User, w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "The user was authenticated! The token was issued by %v and the user is %+v.", u.Issuer, u) +} + +func Example() { + configuration, err := openid.NewConfiguration(openid.ProvidersGetter(getProviders_googlePlayground)) + + if err != nil { + panic(err) + } + + http.Handle("/user", openid.AuthenticateUser(configuration, openid.UserHandlerFunc(AuthenticatedHandlerWithUser))) + http.Handle("/authn", openid.Authenticate(configuration, http.HandlerFunc(AuthenticatedHandler))) + + http.ListenAndServe(":5100", nil) +} + +func myGetProviders() ([]openid.Provider, error) { + provider, err := openid.NewProvider("https://providerissuer", []string{"myClientID"}) + + if err != nil { + return nil, err + } + + return []openid.Provider{provider}, nil +} +``` +This example is also available in the documentation of this package, for more details see [GoDoc](https://godoc.org/github.com/emanoelxavier/openid2go/openid). + +## Tests + +#### Unit Tests +```sh +go test github.com/emanoelxavier/openid2go/openid +``` + +#### Integration Tests +In addition to to unit tests, this package also comes with integration tests that will validate real ID Tokens issued by real OIDC providers. The following command will run those tests: + +```sh +go test -tags integration github.com/emanoelxavier/openid2go/openid -issuer=[issuer] -clientID=[clientID] -idToken=[idToken] +``` + +Replace [issuer], [clientID] and [idToken] with the information from an identity provider of your choice. + +For a quick spin you can use it with tokens issued by Google for the [Google OAuth PlayGround](https://developers.google.com/oauthplayground) entering "openid" (without quotes) within the scope field and copying the issued ID Token. For this provider and client the values will be: + +```sh +go test -tags integration github.com/emanoelxavier/openid2go/openid -issuer=https://accounts.google.com -clientID=407408718192.apps.googleusercontent.com -idToken=copiedIDToken +``` + +## Contributing + +1. Open an issue if found a bug or have a functional request. +2. Disccuss. +3. Branch off, write the fix with test(s) and commit attaching to the issue. +4. Make a pull request. \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/configuration.go b/vendor/github.com/TykTechnologies/openid2go/openid/configuration.go new file mode 100644 index 00000000000..b49c90d0184 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/configuration.go @@ -0,0 +1,6 @@ +package openid + +type configuration struct { + Issuer string `json:"issuer"` + JwksUri string `json:"jwks_uri"` +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/configurationprovider.go b/vendor/github.com/TykTechnologies/openid2go/openid/configurationprovider.go new file mode 100644 index 00000000000..66a846af717 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/configurationprovider.go @@ -0,0 +1,54 @@ +package openid + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +const wellKnownOpenIdConfiguration = "/.well-known/openid-configuration" + +type httpGetFunc func(url string) (*http.Response, error) +type decodeResponseFunc func(io.Reader, interface{}) error + +type configurationGetter interface { // Getter + getConfiguration(string) (configuration, error) +} + +type httpConfigurationProvider struct { //configurationProvider + getConfig httpGetFunc //httpGetter + decodeConfig decodeResponseFunc //responseDecoder +} + +func newHTTPConfigurationProvider(gc httpGetFunc, dc decodeResponseFunc) *httpConfigurationProvider { + return &httpConfigurationProvider{gc, dc} +} + +func jsonDecodeResponse(r io.Reader, v interface{}) error { + return json.NewDecoder(r).Decode(v) +} + +func (httpProv *httpConfigurationProvider) getConfiguration(issuer string) (configuration, error) { + // Workaround for tokens issued by google + if issuer == "accounts.google.com" { + issuer = "https://" + issuer + } + + configurationUri := strings.TrimSuffix(issuer, "/") + wellKnownOpenIdConfiguration + var config configuration + resp, err := httpProv.getConfig(configurationUri) + if err != nil { + return config, &ValidationError{Code: ValidationErrorGetOpenIdConfigurationFailure, Message: fmt.Sprintf("Failure while contacting the configuration endpoint %v.", configurationUri), Err: err, HTTPStatus: http.StatusUnauthorized} + } + + defer resp.Body.Close() + + if err := httpProv.decodeConfig(resp.Body, &config); err != nil { + return config, &ValidationError{Code: ValidationErrorDecodeOpenIdConfigurationFailure, Message: fmt.Sprintf("Failure while decoding the configuration retrived from endpoint %v.", configurationUri), Err: err, HTTPStatus: http.StatusUnauthorized} + } + + return config, nil + +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/doc.go b/vendor/github.com/TykTechnologies/openid2go/openid/doc.go new file mode 100644 index 00000000000..244eab61f60 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/doc.go @@ -0,0 +1,129 @@ +/*Package openid implements web service middlewares for authenticating identities represented by +OpenID Connect (OIDC) ID Tokens. +For details on OIDC go to http://openid.net/specs/openid-connect-core-1_0.html + +The middlewares will: extract the ID Token from the request; retrieve the OIDC provider (OP) +configuration and signing keys; validate the token and provide the user identity and claims to the +underlying web service. + +The Basics + +At the core of this package are the Authenticate and AuthenticateUser middlewares. To use either one +of them you will need an instance of the Configuration type, to create that you use NewConfiguration. + + func Authenticate(conf *Configuration, h http.Handler) http.Handler + func AuthenticateUser(conf *Configuration, h UserHandler) http.Handler + NewConfiguration(options ...option) (*Configuration, error) + + // options: + + func ErrorHandler(eh ErrorHandlerFunc) func(*Configuration) error + func ProvidersGetter(pg GetProvidersFunc) func(*Configuration) error + + // extension points: + + type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool + type GetProvidersFunc func() ([]Provider, error) + +The Example below demonstrates these elements working together. + +Token Parsing + +Both Authenticate and AuthenticateUser middlewares expect the incoming requests to have an HTTP +Authorization header with the content 'Bearer [idToken]' where [idToken] is a valid ID Token issued by +an OP. For instance: + + Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6... + +By default, requests that do not contain an Authorization header with this content will not be forwarded +to the next HTTP handler in the pipeline, instead they will fail back to the client with HTTP status +400/Bad Request. + +Token Validation + +Once parsed the ID Token will be validated: + + 1) Is the token a valid jwt? + 2) Is the token issued by a known OP? + 3) Is the token issued for a known client? + 4) Is the token valid at the time ('not use before' and 'expire at' claims)? + 5) Is the token signed accordingly? + +The signature validation is done with the public keys retrieved from the jwks_uri published by the OP in +its OIDC metadata (https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). + +The token's issuer and audiences will be verified using a collection of the type Provider. This +collection is retrieved by calling the implementation of the function GetProvidersFunc registered with +the Configuration. +If the token issuer matches the Issuer of any of the providers and the token audience matches at least +one of the ClientIDs of the respective provider then the token is considered valid. + + func myGetProviders() ([]openid.Provider, error) { + p, err := openid.NewProvider("https://accounts.google.com", + []string{"407408718192.apps.googleusercontent.com"}) + // .... + return []openid.Provider{p}, nil + } + + c, _ := openid.NewConfiguration(openid.ProvidersGetter(myGetProviders)) + +In code above only tokens with Issuer claim ('iss') https://accounts.google.com and Audiences claim +('aud') containing "407408718192.apps.googleusercontent.com" can be valid. + +By default, when the token validation fails for any reason the requests will not be forwarded to the next +handler in the pipeline, instead they will fail back to the client with HTTP status 401/Unauthorized. + +Error Handling + +The default behavior of the Authenticate and AuthenticateUser middlewares upon error conditions is: +the execution pipeline is stopped (the next handler will not be executed), the response will contain +status 400 when a token is not found and 401 when it is invalid, and the response will also contain the +error message. +This behavior can be changed by implementing a function of type ErrorHandlerFunc and registering it +using ErrorHandler with the Configuration. + + type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool + func ErrorHandler(eh ErrorHandlerFunc) func(*Configuration) error + +For instance: + + func myErrorHandler(e error, w http.ResponseWriter, r *http.Request) bool { + fmt.Fprintf(w, e.Error()) + return false + } + + c, _ := openid.NewConfiguration(openid.ProvidersGetter(myGetProviders), + openid.ErrorHandler(myErrorHandler)) + +In the code above myErrorHandler adds the error message to the response and let the execution +continue to the next handler in the pipeline (returning false) for all error types. +You can use this extension point to fine tune what happens when a specific error is returned by your +implementation of the GetProvidersFunc or even for the error types and codes exported by this +package: + + type ValidationError struct + type ValidationErrorCode uint32 + type SetupError struct + type SetupErrorCode uint32 + +Authenticate vs AuthenticateUser + +Both middlewares Authenticate and AuthenticateUser behave exactly the same way when it comes to +parsing and validating the ID Token. The only difference is that AuthenticateUser will forward the +information about the user's identity from the ID Token to the next handler in the pipeline. +If your service does not need to know the identity of the authenticated user then Authenticate will +suffice, otherwise your choice is AuthenticateUser. +In order to receive the User information from the AuthenticateUser the next handler in the pipeline +must implement the interface UserHandler with the following function: + + ServeHTTPWithUser(*User, http.ResponseWriter, *http.Request) + +You can also make use of the function adapter UserHandlerFunc as shown in the example below: + + func myHandlerWithUser(u *openid.User, w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Authenticated! The user is %+v.", u) + } + + http.Handle("/user", openid.AuthenticateUser(c, openid.UserHandlerFunc(myHandlerWithUser))) +*/ +package openid diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/errors.go b/vendor/github.com/TykTechnologies/openid2go/openid/errors.go new file mode 100644 index 00000000000..d6e43e8959a --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/errors.go @@ -0,0 +1,122 @@ +package openid + +import ( + "fmt" + "net/http" + + "github.com/golang-jwt/jwt/v4" +) + +// SetupErrorCode is the type of error code that can +// be returned by the operations done during middleware setup. +type SetupErrorCode uint32 + +// Setup error constants. +const ( + SetupErrorInvalidIssuer SetupErrorCode = iota // Invalid issuer provided during setup. + SetupErrorInvalidClientIDs // Invalid client id collection provided during setup. + SetupErrorEmptyProviderCollection // Empty collection of providers provided during setup. +) + +// ValidationErrorCode is the type of error code that can +// be returned by the operations done during token validation. +type ValidationErrorCode uint32 + +// Validation error constants. +const ( + ValidationErrorAuthorizationHeaderNotFound ValidationErrorCode = iota // Authorization header not found on request. + ValidationErrorAuthorizationHeaderWrongFormat // Authorization header unexpected format. + ValidationErrorAuthorizationHeaderWrongSchemeName // Authorization header unexpected scheme. + ValidationErrorJwtValidationFailure // Jwt token validation failed with a known error. + ValidationErrorJwtValidationUnknownFailure // Jwt token validation failed with an unknown error. + ValidationErrorInvalidAudienceType // Unexpected token audience type. + ValidationErrorInvalidAudience // Unexpected token audience content. + ValidationErrorAudienceNotFound // Unexpected token audience value. Audience not registered. + ValidationErrorInvalidIssuerType // Unexpected token issuer type. + ValidationErrorInvalidIssuer // Unexpected token issuer content. + ValidationErrorIssuerNotFound // Unexpected token value. Issuer not registered. + ValidationErrorGetOpenIdConfigurationFailure // Failure while retrieving the OIDC configuration. + ValidationErrorDecodeOpenIdConfigurationFailure // Failure while decoding the OIDC configuration. + ValidationErrorGetJwksFailure // Failure while retrieving jwk set. + ValidationErrorDecodeJwksFailure // Failure while decoding the jwk set. + ValidationErrorEmptyJwk // Empty jwk returned. + ValidationErrorEmptyJwkKey // Empty jwk key set returned. + ValidationErrorMarshallingKey // Error while marshalling the signing key. + ValidationErrorKidNotFound // Key identifier not found. + ValidationErrorInvalidSubjectType // Unexpected token subject type. + ValidationErrorInvalidSubject // Unexpected token subject content. + ValidationErrorSubjectNotFound // Token missing the 'sub' claim. + ValidationErrorIdTokenEmpty // Empty ID token. + ValidationErrorEmptyProviders // Empty collection of providers. +) + +const setupErrorMessagePrefix string = "Setup Error." +const validationErrorMessagePrefix string = "Validation Error." + +// SetupError represents the error returned by operations called during +// middleware setup. +type SetupError struct { + Err error + Code SetupErrorCode + Message string +} + +// Error returns a formatted string containing the error Message. +func (se SetupError) Error() string { + return fmt.Sprintf("Setup error. %v", se.Message) +} + +// ValidationError represents the error returned by operations called during +// token validation. +type ValidationError struct { + Err error + Code ValidationErrorCode + Message string + HTTPStatus int +} + +// The ErrorHandlerFunc represents the function used to handle errors during token +// validation. Applications can have their own implementation of this function and +// register it using the ErrorHandler option. Through this extension point applications +// can choose what to do upon different error types, for instance return an certain HTTP Status code +// and/or include some detailed message in the response. +// This function returns false if the next handler registered after the ID Token validation +// should be executed when an error is found or true if the execution should be stopped. +type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool + +// Error returns a formatted string containing the error Message. +func (ve ValidationError) Error() string { + return fmt.Sprintf("Validation error. %v", ve.Message) +} + +// jwtErrorToOpenIdError converts errors of the type *jwt.ValidationError returned during token validation into errors of type *ValidationError +func jwtErrorToOpenIdError(e error) *ValidationError { + if jwtError, ok := e.(*jwt.ValidationError); ok { + if (jwtError.Errors & (jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired | jwt.ValidationErrorSignatureInvalid)) != 0 { + return &ValidationError{Code: ValidationErrorJwtValidationFailure, Message: "Jwt token validation failed.", HTTPStatus: http.StatusUnauthorized} + } + + if (jwtError.Errors & jwt.ValidationErrorMalformed) != 0 { + return &ValidationError{Code: ValidationErrorJwtValidationFailure, Message: "Jwt token validation failed.", HTTPStatus: http.StatusBadRequest} + } + + if (jwtError.Errors & jwt.ValidationErrorUnverifiable) != 0 { + // TODO: improve this once https://github.com/dgrijalva/jwt-go/issues/108 is resolved. + // Currently jwt.Parse does not surface errors returned by the KeyFunc. + return &ValidationError{Code: ValidationErrorJwtValidationFailure, Message: jwtError.Error(), HTTPStatus: http.StatusUnauthorized} + } + } + + return &ValidationError{Code: ValidationErrorJwtValidationUnknownFailure, Message: "Jwt token validation failed with unknown error.", HTTPStatus: http.StatusInternalServerError} +} + +func validationErrorToHTTPStatus(e error, rw http.ResponseWriter, req *http.Request) (halt bool) { + if verr, ok := e.(*ValidationError); ok { + http.Error(rw, verr.Message, verr.HTTPStatus) + } else { + rw.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(rw, e.Error()) + } + + return true +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/idtokenvalidator.go b/vendor/github.com/TykTechnologies/openid2go/openid/idtokenvalidator.go new file mode 100644 index 00000000000..b92170dc1f6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/idtokenvalidator.go @@ -0,0 +1,200 @@ +package openid + +import ( + "fmt" + "net/http" + + "github.com/golang-jwt/jwt/v4" +) + +const issuerClaimName = "iss" +const audiencesClaimName = "aud" +const subjectClaimName = "sub" +const keyIDJwtHeaderName = "kid" + +type JWTTokenValidator interface { + Validate(t string) (jt *jwt.Token, err error) +} + +type jwtParserFunc func(string, jwt.Keyfunc, ...jwt.ParserOption) (*jwt.Token, error) + +type idTokenValidator struct { + provGetter GetProvidersFunc + jwtParser jwtParserFunc + keyGetter signingKeyGetter +} + +func newIDTokenValidator(pg GetProvidersFunc, jp jwtParserFunc, kg signingKeyGetter) *idTokenValidator { + return &idTokenValidator{pg, jp, kg} +} + +func (tv *idTokenValidator) Validate(t string) (*jwt.Token, error) { + jt, err := tv.jwtParser(t, tv.getSigningKey) + if err != nil { + if verr, ok := err.(*jwt.ValidationError); ok { + // If the signing key did not match it may be because the in memory key is outdated. + // Renew the cached signing key. + if (verr.Errors & jwt.ValidationErrorSignatureInvalid) != 0 { + jt, err = tv.jwtParser(t, tv.renewAndGetSigningKey) + } + } + } + + if err != nil { + return nil, jwtErrorToOpenIdError(err) + } + + return jt, nil +} + +func (tv *idTokenValidator) renewAndGetSigningKey(jt *jwt.Token) (interface{}, error) { + // Issuer is already validated when 'getSigningKey was called. + iss := jt.Claims.(jwt.MapClaims)[issuerClaimName].(string) + + err := tv.keyGetter.flushCachedSigningKeys(iss) + + if err != nil { + return nil, err + } + + headerVal, ok := jt.Header[keyIDJwtHeaderName] + + if !ok { + return tv.keyGetter.getSigningKey(iss, "") + } + + switch headerVal.(type) { + case string: + return tv.keyGetter.getSigningKey(iss, headerVal.(string)) + default: + return tv.keyGetter.getSigningKey(iss, "") + } + +} + +func (tv *idTokenValidator) getSigningKey(jt *jwt.Token) (interface{}, error) { + provs, err := tv.provGetter() + if err != nil { + return nil, err + } + + if err := providers(provs).validate(); err != nil { + return nil, err + } + + p, err := validateIssuer(jt, provs) + if err != nil { + return nil, err + } + + _, err = validateAudiences(jt, p) + if err != nil { + return nil, err + } + _, err = validateSubject(jt) + if err != nil { + return nil, err + } + + var kid string = "" + + if jt.Header[keyIDJwtHeaderName] != nil { + kid = jt.Header[keyIDJwtHeaderName].(string) + } + + return tv.keyGetter.getSigningKey(p.Issuer, kid) +} + +func validateIssuer(jt *jwt.Token, ps []Provider) (*Provider, error) { + issuerClaim := getIssuer(jt) + var ti string + + if iss, ok := issuerClaim.(string); ok { + ti = iss + } else { + return nil, &ValidationError{Code: ValidationErrorInvalidIssuerType, Message: fmt.Sprintf("Invalid Issuer type: %T", issuerClaim), HTTPStatus: http.StatusUnauthorized} + } + + if ti == "" { + return nil, &ValidationError{Code: ValidationErrorInvalidIssuer, Message: "The token 'iss' claim was not found or was empty.", HTTPStatus: http.StatusUnauthorized} + } + + // Workaround for tokens issued by google + gi := ti + if gi == "accounts.google.com" { + gi = "https://" + gi + } + + for _, p := range ps { + if ti == p.Issuer || gi == p.Issuer { + return &p, nil + } + } + + return nil, &ValidationError{Code: ValidationErrorIssuerNotFound, Message: fmt.Sprintf("No provider was registered with issuer: %v", ti), HTTPStatus: http.StatusUnauthorized} +} + +func validateSubject(jt *jwt.Token) (string, error) { + subjectClaim := getSubject(jt) + + var ts string + if sub, ok := subjectClaim.(string); ok { + ts = sub + } else { + return ts, &ValidationError{Code: ValidationErrorInvalidSubjectType, Message: fmt.Sprintf("Invalid subject type: %T", subjectClaim), HTTPStatus: http.StatusUnauthorized} + } + + if ts == "" { + return ts, &ValidationError{Code: ValidationErrorInvalidSubject, Message: "The token 'sub' claim was not found or was empty.", HTTPStatus: http.StatusUnauthorized} + } + + return ts, nil +} + +func validateAudiences(jt *jwt.Token, p *Provider) (string, error) { + audiencesClaim, err := getAudiences(jt) + + if err != nil { + return "", err + } + + for _, aud := range p.ClientIDs { + for _, audienceClaim := range audiencesClaim { + ta, ok := audienceClaim.(string) + if !ok { + fmt.Printf("aud type %T \n", audienceClaim) + return "", &ValidationError{Code: ValidationErrorInvalidAudienceType, Message: fmt.Sprintf("Invalid Audiences type: %T", audiencesClaim), HTTPStatus: http.StatusUnauthorized} + } + + if ta == "" { + return "", &ValidationError{Code: ValidationErrorInvalidAudience, Message: "The token 'aud' claim was not found or was empty.", HTTPStatus: http.StatusUnauthorized} + } + + if ta == aud { + return ta, nil + } + } + } + + return "", &ValidationError{Code: ValidationErrorAudienceNotFound, Message: fmt.Sprintf("The provider %v does not have a client id matching any of the token audiences %+v", p.Issuer, audiencesClaim), HTTPStatus: http.StatusUnauthorized} +} + +func getAudiences(t *jwt.Token) ([]interface{}, error) { + audiencesClaim := t.Claims.(jwt.MapClaims)[audiencesClaimName] + if aud, ok := audiencesClaim.(string); ok { + return []interface{}{aud}, nil + } else if _, ok := audiencesClaim.([]interface{}); ok { + return audiencesClaim.([]interface{}), nil + } + + return nil, &ValidationError{Code: ValidationErrorInvalidAudienceType, Message: fmt.Sprintf("Invalid Audiences type: %T", audiencesClaim), HTTPStatus: http.StatusUnauthorized} + +} + +func getIssuer(t *jwt.Token) interface{} { + return t.Claims.(jwt.MapClaims)[issuerClaimName] +} + +func getSubject(t *jwt.Token) interface{} { + return t.Claims.(jwt.MapClaims)[subjectClaimName] +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/jwksprovider.go b/vendor/github.com/TykTechnologies/openid2go/openid/jwksprovider.go new file mode 100644 index 00000000000..2dc90c61719 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/jwksprovider.go @@ -0,0 +1,37 @@ +package openid + +import ( + "fmt" + "net/http" + + "github.com/go-jose/go-jose/v3" +) + +type jwksGetter interface { + getJwkSet(string) (jose.JSONWebKeySet, error) +} + +type httpJwksProvider struct { + getJwks httpGetFunc + decodeJwks decodeResponseFunc +} + +func newHTTPJwksProvider(gf httpGetFunc, df decodeResponseFunc) *httpJwksProvider { + return &httpJwksProvider{gf, df} +} + +func (httpProv *httpJwksProvider) getJwkSet(url string) (jose.JSONWebKeySet, error) { + var jwks jose.JSONWebKeySet + resp, err := httpProv.getJwks(url) + if err != nil { + return jwks, &ValidationError{Code: ValidationErrorGetJwksFailure, Message: fmt.Sprintf("Failure while contacting the jwk endpoint %v: %v", url, err), Err: err, HTTPStatus: http.StatusUnauthorized} + } + + defer resp.Body.Close() + + if err := httpProv.decodeJwks(resp.Body, &jwks); err != nil { + return jwks, &ValidationError{Code: ValidationErrorDecodeJwksFailure, Message: fmt.Sprintf("Failure while decoding the jwk retrieved from the endpoint %v: %v", url, err), Err: err, HTTPStatus: http.StatusUnauthorized} + } + + return jwks, nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/middleware.go b/vendor/github.com/TykTechnologies/openid2go/openid/middleware.go new file mode 100644 index 00000000000..3dce068f6ac --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/middleware.go @@ -0,0 +1,177 @@ +package openid + +import ( + "net/http" + + "github.com/golang-jwt/jwt/v4" +) + +// The Configuration contains the entities needed to perform ID token validation. +// This type should be instantiated at the application startup time. +type Configuration struct { + tokenValidator JWTTokenValidator + IDTokenGetter GetIDTokenFunc + errorHandler ErrorHandlerFunc +} + +type option func(*Configuration) error + +// The NewConfiguration creates a new instance of Configuration and returns a pointer to it. +// This function receives a collection of the function type option. Each of those functions are +// responsible for setting some part of the returned *Configuration. If any if the option functions +// returns an error then NewConfiguration will return a nil configuration and that error. +func NewConfiguration(options ...option) (*Configuration, error) { + m := new(Configuration) + cp := newHTTPConfigurationProvider(http.Get, jsonDecodeResponse) + jp := newHTTPJwksProvider(http.Get, jsonDecodeResponse) + ksp := newSigningKeySetProvider(cp, jp, pemEncodePublicKey) + kp := newSigningKeyProvider(ksp) + m.tokenValidator = newIDTokenValidator(nil, jwt.Parse, kp) + + for _, option := range options { + err := option(m) + + if err != nil { + return nil, err + } + } + + return m, nil +} + +// The ProvidersGetter option registers the function responsible for returning the +// providers containing the valid issuer and client IDs used to validate the ID Token. +func ProvidersGetter(pg GetProvidersFunc) func(*Configuration) error { + return func(c *Configuration) error { + c.tokenValidator.(*idTokenValidator).provGetter = pg + return nil + } +} + +func TokenValidator(tv JWTTokenValidator) func(*Configuration) error { + return func(c *Configuration) error { + c.tokenValidator = tv + return nil + } +} + +// The ErrorHandler option registers the function responsible for handling +// the errors returned during token validation. When this option is not used then the +// middleware will use the default internal implementation validationErrorToHTTPStatus. +func ErrorHandler(eh ErrorHandlerFunc) func(*Configuration) error { + return func(c *Configuration) error { + c.errorHandler = eh + return nil + } +} + +// The Authenticate middleware performs the validation of the OIDC ID Token. +// If an error happens, i.e.: expired token, the next handler may or may not executed depending on the +// provided ErrorHandlerFunc option. The default behavior, determined by validationErrorToHTTPStatus, +// stops the execution and returns Unauthorized. +// If the validation is successful then the next handler(h) will be executed. +func Authenticate(conf *Configuration, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, halt := authenticate(conf, w, r); !halt { + h.ServeHTTP(w, r) + } + }) +} + +// The AuthenticateUser middleware performs the validation of the OIDC ID Token and +// forwards the authenticated user's information to the next handler in the pipeline. +// If an error happens, i.e.: expired token, the next handler may or may not executed depending on the +// provided ErrorHandlerFunc option. The default behavior, determined by validationErrorToHTTPStatus, +// stops the execution and returns Unauthorized. +// If the validation is successful then the next handler(h) will be executed and will +// receive the authenticated user information. +func AuthenticateUser(conf *Configuration, h UserHandler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if u, halt := authenticateUser(conf, w, r); !halt { + h.ServeHTTPWithUser(u, w, r) + } + }) +} + +// Exported authenticate so we don't need to use the middleware +func AuthenticateOIDWithUser(c *Configuration, rw http.ResponseWriter, req *http.Request) (*User, *jwt.Token, bool) { + return authenticateUserWithToken(c, rw, req) +} + +func authenticate(c *Configuration, rw http.ResponseWriter, req *http.Request) (t *jwt.Token, halt bool) { + var tg GetIDTokenFunc + if c.IDTokenGetter == nil { + tg = getIDTokenAuthorizationHeader + } else { + tg = c.IDTokenGetter + } + + var eh ErrorHandlerFunc + if c.errorHandler == nil { + eh = validationErrorToHTTPStatus + } else { + eh = c.errorHandler + } + + ts, err := tg(req) + + if err != nil { + return nil, eh(err, rw, req) + } + + vt, err := c.tokenValidator.Validate(ts) + + if err != nil { + return nil, eh(err, rw, req) + } + + return vt, false +} + +func authenticateUser(c *Configuration, rw http.ResponseWriter, req *http.Request) (u *User, halt bool) { + var vt *jwt.Token + + var eh ErrorHandlerFunc + if c.errorHandler == nil { + eh = validationErrorToHTTPStatus + } else { + eh = c.errorHandler + } + + if t, h := authenticate(c, rw, req); h { + return nil, h + } else { + vt = t + } + + u, err := newUser(vt) + + if err != nil { + return nil, eh(err, rw, req) + } + + return u, false +} + +func authenticateUserWithToken(c *Configuration, rw http.ResponseWriter, req *http.Request) (u *User, vt *jwt.Token, halt bool) { + var eh ErrorHandlerFunc + if c.errorHandler == nil { + eh = validationErrorToHTTPStatus + } else { + eh = c.errorHandler + } + + if t, h := authenticate(c, rw, req); h { + return nil, nil, h + } else { + vt = t + } + + u, err := newUser(vt) + + if err != nil { + return nil, nil, eh(err, rw, req) + } + + return u, vt, false +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/provider.go b/vendor/github.com/TykTechnologies/openid2go/openid/provider.go new file mode 100644 index 00000000000..ed447339bd6 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/provider.go @@ -0,0 +1,79 @@ +package openid + +// Provider represents an OpenId Identity Provider (OP) and contains +// the information needed to perform validation of ID Token. +// See OpenId terminology http://openid.net/specs/openid-connect-core-1_0.html#Terminology. +// +// The Issuer uniquely identifies an OP. This field will be used +// to validate the 'iss' claim present in the ID Token. +// +// The CliendIDs contains the list of client IDs registered with the OP that are meant to be accepted by the service using this package. +// These values are used to validate the 'aud' clain present in the ID Token. +type Provider struct { + Issuer string + ClientIDs []string +} + +// providers represent a collection of OPs. +type providers []Provider + +// NewProvider returns a new instance of a Provider created with the given issuer and clientIDs. +func NewProvider(issuer string, clientIDs []string) (Provider, error) { + p := Provider{issuer, clientIDs} + + if err := p.validate(); err != nil { + return Provider{}, err + } + + return p, nil +} + +// The GetProvidersFunc defines the function type used to retrieve the collection of allowed OP(s) along with the +// respective client IDs registered with those providers that can access the backend service +// using this package. +// A function of this type must be provided to NewConfiguration through the option ProvidersGetter. +// The given function will then be invoked for every request intercepted by the Authenticate or AuthenticateUser middleware. +type GetProvidersFunc func() ([]Provider, error) + +func (ps providers) validate() error { + if len(ps) == 0 { + return &SetupError{Code: SetupErrorEmptyProviderCollection, Message: "The collection of providers must contain at least one element."} + } + + for _, p := range ps { + if err := p.validate(); err != nil { + return err + } + } + + return nil +} + +func (p Provider) validate() error { + if err := validateProviderIssuer(p.Issuer); err != nil { + return err + } + + if err := validateProviderClientIDs(p.ClientIDs); err != nil { + return err + } + + return nil +} + +func validateProviderIssuer(iss string) error { + if iss == "" { + return &SetupError{Code: SetupErrorInvalidIssuer, Message: "Empty string issuer not allowed."} + } + + // TODO: Validate that the issuer format complies with openid spec. + return nil +} + +func validateProviderClientIDs(cIDs []string) error { + if len(cIDs) == 0 { + return &SetupError{Code: SetupErrorInvalidClientIDs, Message: "At leat one client id must be provided."} + } + + return nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/readidtoken.go b/vendor/github.com/TykTechnologies/openid2go/openid/readidtoken.go new file mode 100644 index 00000000000..e90dc5da789 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/readidtoken.go @@ -0,0 +1,39 @@ +package openid + +import ( + "net/http" + "strings" +) + +// GetIdTokenFunc represents the function used to provide the OIDC idToken. +// It uses the provided request(r) to return the id token string(token). +// If the token was not found or had a bad format this function will return an error. +type GetIDTokenFunc func(r *http.Request) (token string, err error) + +// GetIdTokenAuthorizationHeader is the default implementation of the GetIdTokenFunc +// used by this package.I looks for the idToken in the http Authorization header with +// the format 'Bearer TokenString'. If found it will return 'TokenString' if not found +// or the format does not match it will return an error. +func getIDTokenAuthorizationHeader(r *http.Request) (t string, err error) { + h := r.Header.Get("Authorization") + + return CheckAndSplitHeader(h) +} + +func CheckAndSplitHeader(h string) (t string, err error) { + if h == "" { + return h, &ValidationError{Code: ValidationErrorAuthorizationHeaderNotFound, Message: "The 'Authorization' header was not found or was empty.", HTTPStatus: http.StatusBadRequest} + } + + p := strings.Split(h, " ") + + if len(p) != 2 { + return h, &ValidationError{Code: ValidationErrorAuthorizationHeaderWrongFormat, Message: "The 'Authorization' header did not have the correct format.", HTTPStatus: http.StatusBadRequest} + } + + if p[0] != "Bearer" { + return h, &ValidationError{Code: ValidationErrorAuthorizationHeaderWrongSchemeName, Message: "The 'Authorization' header scheme name was not 'Bearer'", HTTPStatus: http.StatusBadRequest} + } + + return p[1], nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyencoder.go b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyencoder.go new file mode 100644 index 00000000000..5b3cfc1e988 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyencoder.go @@ -0,0 +1,23 @@ +package openid + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" +) + +type pemEncodeFunc func(key interface{}) ([]byte, error) + +func pemEncodePublicKey(key interface{}) ([]byte, error) { + mk, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return nil, &ValidationError{Code: ValidationErrorMarshallingKey, Message: fmt.Sprint("The jwk key could not be marshalled."), HTTPStatus: http.StatusInternalServerError, Err: err} + } + + ed := pem.EncodeToMemory(&pem.Block{ + Bytes: mk, + }) + + return ed, nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyprovider.go b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyprovider.go new file mode 100644 index 00000000000..b31f60eeb9a --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeyprovider.go @@ -0,0 +1,118 @@ +package openid + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "sync" +) + +var lock = sync.RWMutex{} + +type signingKeyGetter interface { + flushCachedSigningKeys(issuer string) error + getSigningKey(issuer string, kid string) (interface{}, error) +} + +type signingKeyProvider struct { + keySetGetter signingKeySetGetter + jwksMap map[string][]signingKey +} + +func newSigningKeyProvider(kg signingKeySetGetter) *signingKeyProvider { + keyMap := make(map[string][]signingKey) + return &signingKeyProvider{kg, keyMap} +} + +func (s *signingKeyProvider) flushCachedSigningKeys(issuer string) error { + lock.Lock() + defer lock.Unlock() + delete(s.jwksMap, issuer) + return nil +} + +func (s *signingKeyProvider) refreshSigningKeys(issuer string) error { + skeys, err := s.keySetGetter.getSigningKeySet(issuer) + + if err != nil { + return err + } + + lock.Lock() + s.jwksMap[issuer] = skeys + lock.Unlock() + return nil +} + +func parsePublicKey(data []byte) (interface{}, error) { + input := data + block, _ := pem.Decode(data) + if block != nil { + input = block.Bytes + } + var pub interface{} + var err error + pub, err = x509.ParsePKIXPublicKey(input) + if err != nil { + cert, err0 := x509.ParseCertificate(input) + if err0 != nil { + return nil, err0 + } + pub = cert.PublicKey + err = nil + } + return pub, err +} + +func (s *signingKeyProvider) getSigningKey(issuer string, kid string) (interface{}, error) { + lock.RLock() + sk := findKey(s.jwksMap, issuer, kid) + lock.RUnlock() + + if sk != nil { + parsed, pErr := parsePublicKey(sk) + if pErr != nil { + return sk, nil + } + return parsed, nil + } + + err := s.refreshSigningKeys(issuer) + + if err != nil { + return nil, err + } + + lock.RLock() + sk = findKey(s.jwksMap, issuer, kid) + lock.RUnlock() + + if sk == nil { + return nil, &ValidationError{Code: ValidationErrorKidNotFound, Message: fmt.Sprintf("The jwk set retrieved for the issuer %v does not contain a key identifier %v.", issuer, kid), HTTPStatus: http.StatusUnauthorized} + } + + parsed, pErr := parsePublicKey(sk) + if pErr != nil { + return sk, nil + } + + return parsed, nil +} + +func findKey(km map[string][]signingKey, issuer string, kid string) []byte { + + if skSet, ok := km[issuer]; ok { + if kid == "" { + return skSet[0].key + } else { + for _, sk := range skSet { + if sk.keyID == kid { + return sk.key + } + } + } + } + + return nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/signingkeysetprovider.go b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeysetprovider.go new file mode 100644 index 00000000000..3a40abcde94 --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/signingkeysetprovider.go @@ -0,0 +1,56 @@ +package openid + +import ( + "fmt" + "net/http" +) + +type signingKeySetGetter interface { + getSigningKeySet(issuer string) ([]signingKey, error) +} + +type signingKeySetProvider struct { + configGetter configurationGetter + jwksGetter jwksGetter + keyEncoder pemEncodeFunc +} + +type signingKey struct { + keyID string + key []byte +} + +func newSigningKeySetProvider(cg configurationGetter, jg jwksGetter, ke pemEncodeFunc) *signingKeySetProvider { + return &signingKeySetProvider{cg, jg, ke} +} + +func (signProv *signingKeySetProvider) getSigningKeySet(iss string) ([]signingKey, error) { + conf, err := signProv.configGetter.getConfiguration(iss) + + if err != nil { + return nil, err + } + + jwks, err := signProv.jwksGetter.getJwkSet(conf.JwksUri) + + if err != nil { + return nil, err + } + + if len(jwks.Keys) == 0 { + return nil, &ValidationError{Code: ValidationErrorEmptyJwk, Message: fmt.Sprintf("The jwk set retrieved for the issuer %v does not contain any key.", iss), HTTPStatus: http.StatusUnauthorized} + } + + sk := make([]signingKey, len(jwks.Keys)) + + for i, k := range jwks.Keys { + ek, err := signProv.keyEncoder(k.Key) + if err != nil { + return nil, err + } + + sk[i] = signingKey{k.KeyID, ek} + } + + return sk, nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/user.go b/vendor/github.com/TykTechnologies/openid2go/openid/user.go new file mode 100644 index 00000000000..8c235f37a6b --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/user.go @@ -0,0 +1,45 @@ +package openid + +import ( + "net/http" + + "github.com/golang-jwt/jwt/v4" +) + +// User represents the authenticated user encapsulating information obtained from the validated ID token. +// +// The Issuer contains the value from the 'iss' claim found in the ID Token. +// +// The ID contains the value of the 'sub' claim found in the ID Token. +// +// The Claims contains all the claims present found in the ID Token +type User struct { + Issuer string + ID string + Claims map[string]interface{} +} + +func newUser(t *jwt.Token) (*User, error) { + if t == nil { + return nil, &ValidationError{Code: ValidationErrorIdTokenEmpty, Message: "The token provided to created a user was nil.", HTTPStatus: http.StatusUnauthorized} + } + + iss := getIssuer(t).(string) + + if iss == "" { + return nil, &ValidationError{Code: ValidationErrorInvalidIssuer, Message: "The token provided to created a user did not contain a valid 'iss' claim", HTTPStatus: http.StatusInternalServerError} + } + + sub := getSubject(t).(string) + + if sub == "" { + return nil, &ValidationError{Code: ValidationErrorInvalidSubject, Message: "The token provided to created a user did not contain a valid 'sub' claim.", HTTPStatus: http.StatusInternalServerError} + + } + + u := new(User) + u.Issuer = iss + u.ID = sub + u.Claims = t.Claims.(jwt.MapClaims) + return u, nil +} diff --git a/vendor/github.com/TykTechnologies/openid2go/openid/userhandler.go b/vendor/github.com/TykTechnologies/openid2go/openid/userhandler.go new file mode 100644 index 00000000000..9e45c7129bf --- /dev/null +++ b/vendor/github.com/TykTechnologies/openid2go/openid/userhandler.go @@ -0,0 +1,22 @@ +package openid + +import "net/http" + +// The UserHandler represents a handler to be registered by the middleware AuthenticateUser. +// This handler allows the AuthenticateUser middleware to forward information about the the authenticated user to +// the rest of the application service. +// +// ServeHTTPWithUser is similar to the http.ServeHTTP function. It contains an additional paramater *User, +// which is used by the AuthenticateUser middleware to pass information about the authenticated user. +type UserHandler interface { + ServeHTTPWithUser(*User, http.ResponseWriter, *http.Request) +} + +// The UserHandlerFunc is an adapter to allow the use of functions as UserHandler. +// This is similar to using http.HandlerFunc as http.Handler +type UserHandlerFunc func(*User, http.ResponseWriter, *http.Request) + +// ServeHttpWithUser calls f(u, w, r) +func (f UserHandlerFunc) ServeHTTPWithUser(u *User, w http.ResponseWriter, r *http.Request) { + f(u, w, r) +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/LICENSE.md b/vendor/github.com/TykTechnologies/tyk-pump/LICENSE.md new file mode 100644 index 00000000000..771dfbcd592 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/LICENSE.md @@ -0,0 +1,184 @@ +# Mozilla Public License Version 2.0 + +## 1. Definitions + +### 1.1. “Contributor” +means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. + +### 1.2. “Contributor Version” +means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. + +### 1.3. “Contribution” +means Covered Software of a particular Contributor. + +### 1.4. “Covered Software” +means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. + +### 1.5. “Incompatible With Secondary Licenses” +means + +* a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or +* b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. + +### 1.6. “Executable Form” +means any form of the work other than Source Code Form. + +### 1.7. “Larger Work” +means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. + +### 1.8. “License” +means this document. + +### 1.9. “Licensable” +means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. + +### 1.10. “Modifications” +means any of the following: + +* a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or +* b. any new file in Source Code Form that contains any Covered Software. + +### 1.11. “Patent Claims” of a Contributor +means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. + +### 1.12. “Secondary License” +means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. + +### 1.13. “Source Code Form” +means the form of the work preferred for making modifications. + +### 1.14. “You” (or “Your”) +means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. + +## 2. License Grants and Conditions + +### 2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: + +* a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and +* b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. + +### 2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. + +### 2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: + +* a. for any code that a Contributor has removed from Covered Software; or +* b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or +* c. under Patent Claims infringed by Covered Software in the absence of its Contributions. + +This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). + +### 2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). + +### 2.5. Representation + +Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. + +### 2.6. Fair Use + +This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. + +### 2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. + + +## 3. Responsibilities + +### 3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. + +### 3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +* a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and + +* b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. + +### 3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). + +### 3.4. Notices + +You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. + +### 3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. + + +## 4. Inability to Comply Due to Statute or Regulation + +If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. + + +## 5. Termination + +5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. + + +## 6. Disclaimer of Warranty + +**Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.** + + +## 7. Limitation of Liability + +**Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.** + + +## 8. Litigation + +Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. + + +## 9. Miscellaneous + +This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. + + +## 10. Versions of the License + +### 10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. + +### 10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. + +### 10.3. Modified Versions + +If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). + +### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + +If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. + + +## Exhibit A - Source Code Form License Notice + +> This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + + +## Exhibit B - “Incompatible With Secondary Licenses” Notice + +> This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/aggregate.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/aggregate.go new file mode 100644 index 00000000000..7080712c6a3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/aggregate.go @@ -0,0 +1,1017 @@ +package analytics + +import ( + b64 "encoding/base64" + "encoding/hex" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/fatih/structs" + "github.com/sirupsen/logrus" + "gopkg.in/mgo.v2/bson" + "gorm.io/gorm" +) + +const ( + AgggregateMixedCollectionName = "tyk_analytics_aggregates" + GraphAggregateMixedCollectionName = "tyk_graph_analytics_aggregate" + MongoAggregatePrefix = "mongo-pump-aggregate" + AggregateSQLTable = "tyk_aggregated" + AggregateGraphSQLTable = "tyk_graph_aggregated" +) + +// lastDocumentTimestamp is a map to store the last document timestamps of different Mongo Aggregators +var lastDocumentTimestamp = make(map[string]time.Time) + +// mutex is used to prevent concurrent writes to the same key +var mutex sync.RWMutex + +type ErrorData struct { + Code string + Count int +} + +type Counter struct { + Hits int `json:"hits"` + Success int `json:"success"` + ErrorTotal int `json:"error" gorm:"column:error"` + RequestTime float64 `json:"request_time"` + TotalRequestTime float64 `json:"total_request_time"` + Identifier string `json:"identifier" sql:"-"` + HumanIdentifier string `json:"human_identifier"` + LastTime time.Time `json:"last_time"` + OpenConnections int64 `json:"open_connections"` + ClosedConnections int64 `json:"closed_connections"` + BytesIn int64 `json:"bytes_in"` + BytesOut int64 `json:"bytes_out"` + MaxUpstreamLatency int64 `json:"max_upstream_latency"` + MinUpstreamLatency int64 `json:"min_upstream_latency"` + TotalUpstreamLatency int64 `json:"total_upstream_latency"` + UpstreamLatency float64 `json:"upstream_latency"` + + MaxLatency int64 `json:"max_latency"` + MinLatency int64 `json:"min_latency"` + TotalLatency int64 `json:"total_latency"` + Latency float64 `json:"latency"` + + ErrorMap map[string]int `json:"error_map" sql:"-"` + ErrorList []ErrorData `json:"error_list" sql:"-"` +} + +type GraphRecordAggregate struct { + AnalyticsRecordAggregate + + Types map[string]*Counter + Fields map[string]*Counter + Operation map[string]*Counter + RootFields map[string]*Counter +} + +type AnalyticsRecordAggregate struct { + TimeStamp time.Time + OrgID string + TimeID struct { + Year int + Month int + Day int + Hour int + } + + APIKeys map[string]*Counter + Errors map[string]*Counter + + Versions map[string]*Counter + APIID map[string]*Counter + OauthIDs map[string]*Counter + Geo map[string]*Counter + Tags map[string]*Counter + + Endpoints map[string]*Counter + + Lists struct { + APIKeys []Counter + APIID []Counter + OauthIDs []Counter + Geo []Counter + Tags []Counter + Errors []Counter + Endpoints []Counter + KeyEndpoint map[string][]Counter `bson:"keyendpoints"` + OauthEndpoint map[string][]Counter `bson:"oauthendpoints"` + APIEndpoint []Counter `bson:"apiendpoints"` + } + + KeyEndpoint map[string]map[string]*Counter `bson:"keyendpoints"` + OauthEndpoint map[string]map[string]*Counter `bson:"oauthendpoints"` + ApiEndpoint map[string]*Counter `bson:"apiendpoints"` + + Total Counter + + ExpireAt time.Time `bson:"expireAt" json:"expireAt"` + LastTime time.Time +} + +type SQLAnalyticsRecordAggregate struct { + ID string `gorm:"primaryKey"` + + Counter `json:"counter" gorm:"embedded"` + + TimeStamp int64 `json:"timestamp" gorm:"index:dimension, priority:1"` + OrgID string `json:"org_id" gorm:"index:dimension, priority:2"` + Dimension string `json:"dimension" gorm:"index:dimension, priority:3"` + DimensionValue string `json:"dimension_value" gorm:"index:dimension, priority:4"` + + Code `json:"code" gorm:"embedded"` +} + +type Code struct { + Code1x int `json:"1x" gorm:"1x"` + Code200 int `json:"200" gorm:"200"` + Code201 int `json:"201" gorm:"201"` + Code2x int `json:"2x" gorm:"2x"` + Code301 int `json:"301" gorm:"301"` + Code302 int `json:"302" gorm:"302"` + Code303 int `json:"303" gorm:"303"` + Code304 int `json:"304" gorm:"304"` + Code3x int `json:"3x" gorm:"3x"` + Code400 int `json:"400" gorm:"400"` + Code401 int `json:"401" gorm:"401"` + Code403 int `json:"403" gorm:"403"` + Code404 int `json:"404" gorm:"404"` + Code429 int `json:"429" gorm:"429"` + Code4x int `json:"4x" gorm:"4x"` + Code500 int `json:"500" gorm:"500"` + Code501 int `json:"501" gorm:"501"` + Code502 int `json:"502" gorm:"502"` + Code503 int `json:"503" gorm:"503"` + Code504 int `json:"504" gorm:"504"` + Code5x int `json:"5x" gorm:"5x"` +} + +func (c *Code) ProcessStatusCodes(errorMap map[string]int) { + codeStruct := structs.New(c) + for k, v := range errorMap { + if field, ok := codeStruct.FieldOk("Code" + k); ok { + _ = field.Set(v) + } else { + if field, ok = codeStruct.FieldOk("Code" + string(k[0]) + "x"); ok { + _ = field.Set(v + field.Value().(int)) + } + } + } +} + +func (f *SQLAnalyticsRecordAggregate) TableName() string { + return AggregateSQLTable +} + +func OnConflictAssignments(tableName string, tempTable string) map[string]interface{} { + assignments := make(map[string]interface{}) + f := SQLAnalyticsRecordAggregate{} + baseFields := structs.Fields(f.Code) + for _, field := range baseFields { + jsonTag := field.Tag("json") + colName := "code_" + jsonTag + assignments[colName] = gorm.Expr(tableName + "." + colName + " + " + tempTable + "." + colName) + + } + + fields := structs.Fields(f.Counter) + for _, field := range fields { + jsonTag := field.Tag("json") + colName := "counter_" + jsonTag + + switch jsonTag { + // hits, error, success"s, open_connections, closed_connections, bytes_in, bytes_out,total_request_time, total_upstream_latency, total_latency + case "hits", "error", "success", "open_connections", "closed_connections", "bytes_in", "bytes_out", "total_request_time", "total_latency", "total_upstream_latency": + assignments[colName] = gorm.Expr(tableName + "." + colName + " + " + tempTable + "." + colName) + // request_time, upstream_latency,latency + case "request_time", "upstream_latency", "latency": + // AVG = (oldTotal + newTotal ) / (oldHits + newHits) + var totalVal, totalCol string + switch jsonTag { + case "request_time": + totalCol = "counter_total_request_time" + case "upstream_latency": + totalCol = "counter_total_upstream_latency" + case "latency": + totalCol = "counter_total_latency" + } + totalVal = tempTable + "." + totalCol + + assignments[colName] = gorm.Expr("(" + tableName + "." + totalCol + " +" + totalVal + ")/CAST( " + tableName + ".counter_hits + " + tempTable + ".counter_hits" + " AS REAL)") + + case "max_upstream_latency", "max_latency": + // math max: 0.5 * ((@val1 + @val2) + ABS(@val1 - @val2)) + val1 := tableName + "." + colName + val2 := tempTable + "." + colName + assignments[colName] = gorm.Expr("0.5 * ((" + val1 + " + " + val2 + ") + ABS(" + val1 + " - " + val2 + "))") + + case "min_latency", "min_upstream_latency": + // math min: 0.5 * ((@val1 + @val2) - ABS(@val1 - @val2)) + val1 := tableName + "." + colName + val2 := tempTable + "." + colName + assignments[colName] = gorm.Expr("0.5 * ((" + val1 + " + " + val2 + ") - ABS(" + val1 + " - " + val2 + ")) ") + + case "last_time": + assignments[colName] = gorm.Expr(tempTable + "." + colName) + + } + } + + return assignments +} + +func NewGraphRecordAggregate() GraphRecordAggregate { + analyticsAggregate := AnalyticsRecordAggregate{}.New() + + return GraphRecordAggregate{ + AnalyticsRecordAggregate: analyticsAggregate, + Types: make(map[string]*Counter), + Fields: make(map[string]*Counter), + Operation: make(map[string]*Counter), + RootFields: make(map[string]*Counter), + } +} + +func (f AnalyticsRecordAggregate) New() AnalyticsRecordAggregate { + thisF := AnalyticsRecordAggregate{} + thisF.APIID = make(map[string]*Counter) + thisF.Errors = make(map[string]*Counter) + thisF.Versions = make(map[string]*Counter) + thisF.APIKeys = make(map[string]*Counter) + thisF.OauthIDs = make(map[string]*Counter) + thisF.Geo = make(map[string]*Counter) + thisF.Tags = make(map[string]*Counter) + thisF.Endpoints = make(map[string]*Counter) + thisF.KeyEndpoint = make(map[string]map[string]*Counter) + thisF.OauthEndpoint = make(map[string]map[string]*Counter) + thisF.ApiEndpoint = make(map[string]*Counter) + + return thisF +} + +func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate bson.M) bson.M { + constructor := parent + "." + thisUnit + "." + if parent == "" { + constructor = thisUnit + "." + } + + newUpdate["$inc"].(bson.M)[constructor+"hits"] = incVal.Hits + newUpdate["$inc"].(bson.M)[constructor+"success"] = incVal.Success + newUpdate["$inc"].(bson.M)[constructor+"errortotal"] = incVal.ErrorTotal + for k, v := range incVal.ErrorMap { + newUpdate["$inc"].(bson.M)[constructor+"errormap."+k] = v + } + newUpdate["$inc"].(bson.M)[constructor+"totalrequesttime"] = incVal.TotalRequestTime + newUpdate["$set"].(bson.M)[constructor+"identifier"] = incVal.Identifier + newUpdate["$set"].(bson.M)[constructor+"humanidentifier"] = incVal.HumanIdentifier + newUpdate["$set"].(bson.M)[constructor+"lasttime"] = incVal.LastTime + newUpdate["$set"].(bson.M)[constructor+"openconnections"] = incVal.OpenConnections + newUpdate["$set"].(bson.M)[constructor+"closedconnections"] = incVal.ClosedConnections + newUpdate["$set"].(bson.M)[constructor+"bytesin"] = incVal.BytesIn + newUpdate["$set"].(bson.M)[constructor+"bytesout"] = incVal.BytesOut + newUpdate["$max"].(bson.M)[constructor+"maxlatency"] = incVal.MaxLatency + // Don't update min latency in case of errors + if incVal.Hits != incVal.ErrorTotal { + if newUpdate["$min"] == nil { + newUpdate["$min"] = bson.M{} + } + newUpdate["$min"].(bson.M)[constructor+"minlatency"] = incVal.MinLatency + newUpdate["$min"].(bson.M)[constructor+"minupstreamlatency"] = incVal.MinUpstreamLatency + } + newUpdate["$max"].(bson.M)[constructor+"maxupstreamlatency"] = incVal.MaxUpstreamLatency + newUpdate["$inc"].(bson.M)[constructor+"totalupstreamlatency"] = incVal.TotalUpstreamLatency + newUpdate["$inc"].(bson.M)[constructor+"totallatency"] = incVal.TotalLatency + + return newUpdate +} + +func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate bson.M) bson.M { + constructor := parent + "." + thisUnit + "." + if parent == "" { + constructor = thisUnit + "." + } + newUpdate["$set"].(bson.M)[constructor+"requesttime"] = realTime + + return newUpdate +} + +func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpdate bson.M, counter *Counter) bson.M { + if counter.Hits > 0 { + counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) + counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) + } else { + counter.Latency = 0.0 + counter.UpstreamLatency = 0.0 + } + + constructor := parent + "." + thisUnit + "." + if parent == "" { + constructor = thisUnit + "." + } + newUpdate["$set"].(bson.M)[constructor+"latency"] = counter.Latency + newUpdate["$set"].(bson.M)[constructor+"upstreamlatency"] = counter.UpstreamLatency + + return newUpdate +} + +type Dimension struct { + Name string + Value string + Counter *Counter +} + +func fnLatencySetter(counter *Counter) *Counter { + if counter.Hits > 0 { + counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) + counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) + } + return counter +} + +func (g *GraphRecordAggregate) Dimensions() []Dimension { + dimensions := g.AnalyticsRecordAggregate.Dimensions() + for key, inc := range g.Types { + dimensions = append(dimensions, Dimension{Name: "types", Value: key, Counter: fnLatencySetter(inc)}) + } + + for key, inc := range g.Fields { + dimensions = append(dimensions, Dimension{Name: "fields", Value: key, Counter: fnLatencySetter(inc)}) + } + + for key, inc := range g.Operation { + dimensions = append(dimensions, Dimension{Name: "operation", Value: key, Counter: fnLatencySetter(inc)}) + } + + for key, inc := range g.RootFields { + dimensions = append(dimensions, Dimension{Name: "rootfields", Value: key, Counter: fnLatencySetter(inc)}) + } + + return dimensions +} + +func (f *AnalyticsRecordAggregate) Dimensions() (dimensions []Dimension) { + for key, inc := range f.APIID { + dimensions = append(dimensions, Dimension{"apiid", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.Errors { + dimensions = append(dimensions, Dimension{"errors", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.Versions { + dimensions = append(dimensions, Dimension{"versions", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.APIKeys { + dimensions = append(dimensions, Dimension{"apikeys", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.OauthIDs { + dimensions = append(dimensions, Dimension{"oauthids", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.Geo { + dimensions = append(dimensions, Dimension{"geo", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.Tags { + dimensions = append(dimensions, Dimension{"tags", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.Endpoints { + dimensions = append(dimensions, Dimension{"endpoints", key, fnLatencySetter(inc)}) + } + + for key, inc := range f.KeyEndpoint { + for k, v := range inc { + dimensions = append(dimensions, Dimension{"keyendpoints", key + "." + k, fnLatencySetter(v)}) + } + } + + for key, inc := range f.OauthEndpoint { + for k, v := range inc { + dimensions = append(dimensions, Dimension{"oauthendpoints", key + "." + k, fnLatencySetter(v)}) + } + } + + for key, inc := range f.ApiEndpoint { + dimensions = append(dimensions, Dimension{"apiendpoints", key, fnLatencySetter(inc)}) + } + + dimensions = append(dimensions, Dimension{"", "total", fnLatencySetter(&f.Total)}) + + return +} + +func (f *AnalyticsRecordAggregate) AsChange() (newUpdate bson.M) { + newUpdate = bson.M{ + "$inc": bson.M{}, + "$set": bson.M{}, + "$max": bson.M{}, + } + + for _, d := range f.Dimensions() { + newUpdate = f.generateBSONFromProperty(d.Name, d.Value, d.Counter, newUpdate) + } + + newUpdate = f.generateBSONFromProperty("", "total", &f.Total, newUpdate) + + asTime := f.TimeStamp + newTime := time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) + newUpdate["$set"].(bson.M)["timestamp"] = newTime + newUpdate["$set"].(bson.M)["expireAt"] = f.ExpireAt + newUpdate["$set"].(bson.M)["timeid.year"] = newTime.Year() + newUpdate["$set"].(bson.M)["timeid.month"] = newTime.Month() + newUpdate["$set"].(bson.M)["timeid.day"] = newTime.Day() + newUpdate["$set"].(bson.M)["timeid.hour"] = newTime.Hour() + newUpdate["$set"].(bson.M)["lasttime"] = f.LastTime + + return newUpdate +} + +func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter *Counter, newUpdate bson.M) { + constructor := parent + "." + thisUnit + "." + if parent == "" { + constructor = thisUnit + "." + } + + errorlist := make([]ErrorData, 0) + + for k, v := range counter.ErrorMap { + element := ErrorData{ + Code: k, + Count: v, + } + errorlist = append(errorlist, element) + } + counter.ErrorList = errorlist + + newUpdate["$set"].(bson.M)[constructor+"errorlist"] = counter.ErrorList +} + +func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string]*Counter, newUpdate bson.M) []Counter { + result := make([]Counter, 0) + + for thisUnit, incVal := range data { + var newTime float64 + + if incVal.Hits > 0 { + newTime = incVal.TotalRequestTime / float64(incVal.Hits) + } + f.SetErrorList(fieldName, thisUnit, incVal, newUpdate) + newUpdate = f.generateSetterForTime(fieldName, thisUnit, newTime, newUpdate) + newUpdate = f.latencySetter(fieldName, thisUnit, newUpdate, incVal) + result = append(result, *incVal) + } + + return result +} + +func (f *AnalyticsRecordAggregate) AsTimeUpdate() bson.M { + newUpdate := bson.M{ + "$set": bson.M{}, + } + + // We need to create lists of API data so that we can aggregate across the list + // in order to present top-20 style lists of APIs, Tokens etc. + // apis := make([]Counter, 0) + newUpdate["$set"].(bson.M)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) + + newUpdate["$set"].(bson.M)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) + + newUpdate["$set"].(bson.M)["lists.versions"] = f.getRecords("versions", f.Versions, newUpdate) + + newUpdate["$set"].(bson.M)["lists.apikeys"] = f.getRecords("apikeys", f.APIKeys, newUpdate) + + newUpdate["$set"].(bson.M)["lists.oauthids"] = f.getRecords("oauthids", f.OauthIDs, newUpdate) + + newUpdate["$set"].(bson.M)["lists.geo"] = f.getRecords("geo", f.Geo, newUpdate) + + newUpdate["$set"].(bson.M)["lists.tags"] = f.getRecords("tags", f.Tags, newUpdate) + + newUpdate["$set"].(bson.M)["lists.endpoints"] = f.getRecords("endpoints", f.Endpoints, newUpdate) + + for thisUnit, incVal := range f.KeyEndpoint { + parent := "lists.keyendpoints." + thisUnit + newUpdate["$set"].(bson.M)[parent] = f.getRecords("keyendpoints."+thisUnit, incVal, newUpdate) + } + + for thisUnit, incVal := range f.OauthEndpoint { + parent := "lists.oauthendpoints." + thisUnit + newUpdate["$set"].(bson.M)[parent] = f.getRecords("oauthendpoints."+thisUnit, incVal, newUpdate) + } + + newUpdate["$set"].(bson.M)["lists.apiendpoints"] = f.getRecords("apiendpoints", f.ApiEndpoint, newUpdate) + + var newTime float64 + + if f.Total.Hits > 0 { + newTime = f.Total.TotalRequestTime / float64(f.Total.Hits) + } + f.SetErrorList("", "total", &f.Total, newUpdate) + newUpdate = f.generateSetterForTime("", "total", newTime, newUpdate) + newUpdate = f.latencySetter("", "total", newUpdate, &f.Total) + + return newUpdate +} + +// DiscardAggregations this method discard the aggregations of X field specified in the aggregated pump configuration +func (f *AnalyticsRecordAggregate) DiscardAggregations(fields []string) { + for _, field := range fields { + switch field { + case "APIID", "apiid": + f.APIID = make(map[string]*Counter) + case "Errors", "errors": + f.Errors = make(map[string]*Counter) + case "Versions", "versions": + f.Versions = make(map[string]*Counter) + case "APIKeys", "apikeys": + f.APIKeys = make(map[string]*Counter) + case "OauthIDs", "oauthids": + f.OauthIDs = make(map[string]*Counter) + case "Geo", "geo": + f.Geo = make(map[string]*Counter) + case "Tags", "tags": + f.Tags = make(map[string]*Counter) + case "Endpoints", "endpoints": + f.Endpoints = make(map[string]*Counter) + case "KeyEndpoint", "keyendpoints": + f.KeyEndpoint = make(map[string]map[string]*Counter) + case "OauthEndpoint", "oauthendpoints": + f.OauthEndpoint = make(map[string]map[string]*Counter) + case "ApiEndpoint", "apiendpoints": + f.ApiEndpoint = make(map[string]*Counter) + default: + log.WithFields(logrus.Fields{ + "prefix": MongoAggregatePrefix, + "field": field, + }).Warning("Invalid field in the ignore list. Skipping.") + } + } +} + +func doHash(in string) string { + sEnc := b64.StdEncoding.EncodeToString([]byte(in)) + search := strings.TrimRight(sEnc, "=") + return search +} + +func ignoreTag(tag string, ignoreTagPrefixList []string) bool { + // ignore tag added for key by gateway + if strings.HasPrefix(tag, "key-") { + return true + } + + for _, prefix := range ignoreTagPrefixList { + if strings.HasPrefix(tag, prefix) { + return true + } + } + + return false +} + +func replaceUnsupportedChars(path string) string { + result := path + + if strings.Contains(path, ".") { + dotUnicode := fmt.Sprintf("\\u%x", ".") + result = strings.Replace(path, ".", dotUnicode, -1) + } + + return result +} + +func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime int) map[string]GraphRecordAggregate { + aggregateMap := make(map[string]GraphRecordAggregate) + + for _, item := range data { + record, ok := item.(AnalyticsRecord) + if !ok { + continue + } + + if !record.IsGraphRecord() { + continue + } + + graphRec := record.ToGraphRecord() + + aggregate, found := aggregateMap[record.OrgID] + if !found { + aggregate = NewGraphRecordAggregate() + + // Set the hourly timestamp & expiry + asTime := record.TimeStamp + aggregate.TimeStamp = setAggregateTimestamp(dbIdentifier, asTime, aggregationTime) + aggregate.ExpireAt = record.ExpireAt + aggregate.TimeID.Year = asTime.Year() + aggregate.TimeID.Month = int(asTime.Month()) + aggregate.TimeID.Day = asTime.Day() + aggregate.TimeID.Hour = asTime.Hour() + aggregate.OrgID = record.OrgID + aggregate.LastTime = record.TimeStamp + aggregate.Total.ErrorMap = make(map[string]int) + } + + var counter Counter + aggregate.AnalyticsRecordAggregate, counter = incrementAggregate(&aggregate.AnalyticsRecordAggregate, &graphRec.AnalyticsRecord, false, nil) + // graph errors are different from http status errors and can occur even if a response is gotten. + // check for graph errors and increment the error count if there are indeed graph errors + if graphRec.HasErrors && counter.ErrorTotal < 1 { + counter.ErrorTotal++ + counter.Success-- + } + c := incrementOrSetUnit(&counter, aggregate.Operation[graphRec.OperationType]) + aggregate.Operation[graphRec.OperationType] = c + aggregate.Operation[graphRec.OperationType].Identifier = graphRec.OperationType + aggregate.Operation[graphRec.OperationType].HumanIdentifier = graphRec.OperationType + + for t, fields := range graphRec.Types { + c = incrementOrSetUnit(&counter, aggregate.Types[t]) + aggregate.Types[t] = c + aggregate.Types[t].Identifier = t + aggregate.Types[t].HumanIdentifier = t + for _, f := range fields { + label := fmt.Sprintf("%s_%s", t, f) + c := incrementOrSetUnit(&counter, aggregate.Fields[label]) + aggregate.Fields[label] = c + aggregate.Fields[label].Identifier = label + aggregate.Fields[label].HumanIdentifier = label + } + } + + for _, field := range graphRec.RootFields { + c = incrementOrSetUnit(&counter, aggregate.RootFields[field]) + aggregate.RootFields[field] = c + aggregate.RootFields[field].Identifier = field + aggregate.RootFields[field].HumanIdentifier = field + } + aggregateMap[record.OrgID] = aggregate + } + return aggregateMap +} + +// AggregateData calculates aggregated data, returns map orgID => aggregated analytics data +func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList []string, dbIdentifier string, aggregationTime int) map[string]AnalyticsRecordAggregate { + analyticsPerOrg := make(map[string]AnalyticsRecordAggregate) + for _, v := range data { + thisV := v.(AnalyticsRecord) + orgID := thisV.OrgID + + if orgID == "" { + continue + } + + // We don't want to aggregate Graph Data with REST data - there is a different type for that. + if thisV.IsGraphRecord() { + continue + } + + thisAggregate, found := analyticsPerOrg[orgID] + + if !found { + thisAggregate = AnalyticsRecordAggregate{}.New() + + // Set the hourly timestamp & expiry + asTime := thisV.TimeStamp + thisAggregate.TimeStamp = setAggregateTimestamp(dbIdentifier, asTime, aggregationTime) + thisAggregate.ExpireAt = thisV.ExpireAt + thisAggregate.TimeID.Year = asTime.Year() + thisAggregate.TimeID.Month = int(asTime.Month()) + thisAggregate.TimeID.Day = asTime.Day() + thisAggregate.TimeID.Hour = asTime.Hour() + thisAggregate.OrgID = orgID + thisAggregate.LastTime = thisV.TimeStamp + thisAggregate.Total.ErrorMap = make(map[string]int) + } + thisAggregate, _ = incrementAggregate(&thisAggregate, &thisV, trackAllPaths, ignoreTagPrefixList) + analyticsPerOrg[orgID] = thisAggregate + } + + return analyticsPerOrg +} + +// incrementAggregate increments the analytic record aggregate fields using the analytics record +func incrementAggregate(aggregate *AnalyticsRecordAggregate, record *AnalyticsRecord, trackAllPaths bool, ignoreTagPrefixList []string) (AnalyticsRecordAggregate, Counter) { + // Always update the last timestamp + aggregate.LastTime = record.TimeStamp + aggregate.Total.LastTime = record.TimeStamp + + // Create the counter for this record + var thisCounter Counter + if record.ResponseCode == -1 { + thisCounter = Counter{ + LastTime: record.TimeStamp, + OpenConnections: record.Network.OpenConnections, + ClosedConnections: record.Network.ClosedConnection, + BytesIn: record.Network.BytesIn, + BytesOut: record.Network.BytesOut, + } + aggregate.Total.OpenConnections += thisCounter.OpenConnections + aggregate.Total.ClosedConnections += thisCounter.ClosedConnections + aggregate.Total.BytesIn += thisCounter.BytesIn + aggregate.Total.BytesOut += thisCounter.BytesOut + if record.APIID != "" { + c := aggregate.APIID[record.APIID] + if c == nil { + c = &Counter{ + Identifier: record.APIID, + HumanIdentifier: record.APIName, + } + aggregate.APIID[record.APIID] = c + } + c.BytesIn += thisCounter.BytesIn + c.BytesOut += thisCounter.BytesOut + } + } else { + thisCounter = Counter{ + Hits: 1, + Success: 0, + ErrorTotal: 0, + RequestTime: float64(record.RequestTime), + TotalRequestTime: float64(record.RequestTime), + LastTime: record.TimeStamp, + + MaxUpstreamLatency: record.Latency.Upstream, + MinUpstreamLatency: record.Latency.Upstream, + TotalUpstreamLatency: record.Latency.Upstream, + MaxLatency: record.Latency.Total, + MinLatency: record.Latency.Total, + TotalLatency: record.Latency.Total, + ErrorMap: make(map[string]int), + } + aggregate.Total.Hits++ + aggregate.Total.TotalRequestTime += float64(record.RequestTime) + + // We need an initial value + aggregate.Total.RequestTime = aggregate.Total.TotalRequestTime / float64(aggregate.Total.Hits) + if record.ResponseCode >= 400 { + thisCounter.ErrorTotal = 1 + thisCounter.ErrorMap[strconv.Itoa(record.ResponseCode)]++ + aggregate.Total.ErrorTotal++ + aggregate.Total.ErrorMap[strconv.Itoa(record.ResponseCode)]++ + } + + if (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + thisCounter.Success = 1 + aggregate.Total.Success++ + } + + aggregate.Total.TotalLatency += record.Latency.Total + aggregate.Total.TotalUpstreamLatency += record.Latency.Upstream + + if aggregate.Total.MaxLatency < record.Latency.Total { + aggregate.Total.MaxLatency = record.Latency.Total + } + + if aggregate.Total.MaxUpstreamLatency < record.Latency.Upstream { + aggregate.Total.MaxUpstreamLatency = record.Latency.Upstream + } + + // by default, min_total_latency will have 0 value + // it should not be set to 0 always + if aggregate.Total.Hits == 1 { + aggregate.Total.MinLatency = record.Latency.Total + aggregate.Total.MinUpstreamLatency = record.Latency.Upstream + } else { + // Don't update min latency in case of error + if aggregate.Total.MinLatency > record.Latency.Total && (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + aggregate.Total.MinLatency = record.Latency.Total + } + // Don't update min latency in case of error + if aggregate.Total.MinUpstreamLatency > record.Latency.Upstream && (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + aggregate.Total.MinUpstreamLatency = record.Latency.Upstream + } + } + + if trackAllPaths { + record.TrackPath = true + } + + // Convert to a map (for easy iteration) + vAsMap := structs.Map(record) + for key, value := range vAsMap { + switch key { + case "APIID": + val, ok := value.(string) + c := incrementOrSetUnit(&thisCounter, aggregate.APIID[val]) + if val != "" && ok { + aggregate.APIID[val] = c + aggregate.APIID[val].Identifier = record.APIID + aggregate.APIID[val].HumanIdentifier = record.APIName + } + case "ResponseCode": + val, ok := value.(int) + if !ok { + break + } + errAsStr := strconv.Itoa(val) + if errAsStr != "" { + c := incrementOrSetUnit(&thisCounter, aggregate.Errors[errAsStr]) + if c.ErrorTotal > 0 { + aggregate.Errors[errAsStr] = c + aggregate.Errors[errAsStr].Identifier = errAsStr + } + } + case "APIVersion": + val, ok := value.(string) + versionStr := doHash(record.APIID + ":" + val) + c := incrementOrSetUnit(&thisCounter, aggregate.Versions[versionStr]) + if val != "" && ok { + aggregate.Versions[versionStr] = c + aggregate.Versions[versionStr].Identifier = val + aggregate.Versions[versionStr].HumanIdentifier = val + } + case "APIKey": + val, ok := value.(string) + if val != "" && ok { + c := incrementOrSetUnit(&thisCounter, aggregate.APIKeys[val]) + aggregate.APIKeys[val] = c + aggregate.APIKeys[val].Identifier = val + aggregate.APIKeys[val].HumanIdentifier = record.Alias + + if record.TrackPath { + keyStr := doHash(record.APIID + ":" + record.Path) + data := aggregate.KeyEndpoint[val] + + if data == nil { + data = make(map[string]*Counter) + } + + c = incrementOrSetUnit(&thisCounter, data[keyStr]) + c.Identifier = keyStr + c.HumanIdentifier = keyStr + data[keyStr] = c + aggregate.KeyEndpoint[val] = data + + } + } + case "OauthID": + val, ok := value.(string) + if val != "" && ok { + c := incrementOrSetUnit(&thisCounter, aggregate.OauthIDs[val]) + aggregate.OauthIDs[val] = c + aggregate.OauthIDs[val].Identifier = val + + if record.TrackPath { + keyStr := doHash(record.APIID + ":" + record.Path) + data := aggregate.OauthEndpoint[val] + + if data == nil { + data = make(map[string]*Counter) + } + + c = incrementOrSetUnit(&thisCounter, data[keyStr]) + c.Identifier = keyStr + c.HumanIdentifier = keyStr + data[keyStr] = c + aggregate.OauthEndpoint[val] = data + } + } + case "Geo": + c := incrementOrSetUnit(&thisCounter, aggregate.Geo[record.Geo.Country.ISOCode]) + if record.Geo.Country.ISOCode != "" { + aggregate.Geo[record.Geo.Country.ISOCode] = c + aggregate.Geo[record.Geo.Country.ISOCode].Identifier = record.Geo.Country.ISOCode + aggregate.Geo[record.Geo.Country.ISOCode].HumanIdentifier = record.Geo.Country.ISOCode + } + + case "Tags": + for _, thisTag := range record.Tags { + trimmedTag := TrimTag(thisTag) + + if trimmedTag != "" && !ignoreTag(thisTag, ignoreTagPrefixList) { + c := incrementOrSetUnit(&thisCounter, aggregate.Tags[trimmedTag]) + aggregate.Tags[trimmedTag] = c + aggregate.Tags[trimmedTag].Identifier = trimmedTag + aggregate.Tags[trimmedTag].HumanIdentifier = trimmedTag + } + } + + case "TrackPath": + val, ok := value.(bool) + if !ok { + break + } + log.Debug("TrackPath=", val) + if val { + fixedPath := replaceUnsupportedChars(record.Path) + c := incrementOrSetUnit(&thisCounter, aggregate.Endpoints[fixedPath]) + aggregate.Endpoints[fixedPath] = c + aggregate.Endpoints[fixedPath].Identifier = record.Path + aggregate.Endpoints[fixedPath].HumanIdentifier = record.Path + + keyStr := hex.EncodeToString([]byte(record.APIID + ":" + record.APIVersion + ":" + record.Path)) + c = incrementOrSetUnit(&thisCounter, aggregate.ApiEndpoint[keyStr]) + aggregate.ApiEndpoint[keyStr] = c + aggregate.ApiEndpoint[keyStr].Identifier = keyStr + aggregate.ApiEndpoint[keyStr].HumanIdentifier = record.Path + } + } + } + } + return *aggregate, thisCounter +} + +// incrementOrSetUnit is a Mini function to handle incrementing a specific counter in our object +func incrementOrSetUnit(b, c *Counter) *Counter { + base := *b + if c == nil { + newCounter := base + newCounter.ErrorMap = make(map[string]int) + for k, v := range base.ErrorMap { + newCounter.ErrorMap[k] = v + } + c = &newCounter + } else { + c.Hits += base.Hits + c.Success += base.Success + c.ErrorTotal += base.ErrorTotal + for k, v := range base.ErrorMap { + c.ErrorMap[k] += v + } + c.TotalRequestTime += base.TotalRequestTime + c.RequestTime = c.TotalRequestTime / float64(c.Hits) + + if c.MaxLatency < base.MaxLatency { + c.MaxLatency = base.MaxLatency + } + + // don't update min latency in case of errors + if c.MinLatency > base.MinLatency && base.ErrorTotal == 0 { + c.MinLatency = base.MinLatency + } + + if c.MaxUpstreamLatency < base.MaxUpstreamLatency { + c.MaxUpstreamLatency = base.MaxUpstreamLatency + } + + // don't update min latency in case of errors + if c.MinUpstreamLatency > base.MinUpstreamLatency && base.ErrorTotal == 0 { + c.MinUpstreamLatency = base.MinUpstreamLatency + } + + c.TotalLatency += base.TotalLatency + c.TotalUpstreamLatency += base.TotalUpstreamLatency + + } + + return c +} + +func TrimTag(thisTag string) string { + trimmedTag := strings.TrimSpace(thisTag) + + trimmedTag = strings.ReplaceAll(trimmedTag, ".", "") + return trimmedTag +} + +// SetlastTimestampAgggregateRecord sets the last timestamp for the aggregate record +func SetlastTimestampAgggregateRecord(id string, date time.Time) { + mutex.Lock() + defer mutex.Unlock() + lastDocumentTimestamp[id] = date +} + +// getLastDocumentTimestamp gets the last timestamp for the aggregate record +func getLastDocumentTimestamp(id string) (time.Time, bool) { + mutex.RLock() + defer mutex.RUnlock() + ts, ok := lastDocumentTimestamp[id] + return ts, ok +} + +func setAggregateTimestamp(dbIdentifier string, asTime time.Time, aggregationTime int) time.Time { + // if aggregationTime is set to 60, use asTime.Hour() and group every record by hour + if aggregationTime == 60 { + return time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), 0, 0, 0, asTime.Location()) + } + + // get the last document timestamp + lastDocumentTS, ok := getLastDocumentTimestamp(dbIdentifier) + emptyTime := time.Time{} + if lastDocumentTS == emptyTime || !ok { + // if it's not set, or it's empty, just set it to the current time + lastDocumentTS = time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) + SetlastTimestampAgggregateRecord(dbIdentifier, lastDocumentTS) + } + if dbIdentifier != "" { + // if aggregationTime != 60 and the database is Mongo (because we have an identifier): + if lastDocumentTS.Add(time.Minute * time.Duration(aggregationTime)).After(asTime) { + // if the last record timestamp + aggregationTime setting is after the current time, just add the new record to the current document + return lastDocumentTS + } + // if last record timestamp + amount of minutes set is before current time, just create a new record + newTime := time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) + SetlastTimestampAgggregateRecord(dbIdentifier, newTime) + return newTime + } + // if aggregationTime is set to 1 and DB is not Mongo, use asTime.Minute() and group every record by minute + return time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.go new file mode 100644 index 00000000000..60acc6b28d5 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.go @@ -0,0 +1,372 @@ +package analytics + +import ( + "bytes" + "fmt" + "net" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/fatih/structs" + "github.com/oschwald/maxminddb-golang" + "google.golang.org/protobuf/types/known/timestamppb" + + analyticsproto "github.com/TykTechnologies/tyk-pump/analytics/proto" + + "github.com/TykTechnologies/tyk-pump/logger" +) + +const ( + PredefinedTagGraphAnalytics = "tyk-graph-analytics" +) + +var log = logger.GetLogger() + +type NetworkStats struct { + OpenConnections int64 `json:"open_connections"` + ClosedConnection int64 `json:"closed_connections"` + BytesIn int64 `json:"bytes_in"` + BytesOut int64 `json:"bytes_out"` +} + +type Latency struct { + Total int64 `json:"total"` + Upstream int64 `json:"upstream"` +} + +const SQLTable = "tyk_analytics" + +// AnalyticsRecord encodes the details of a request +type AnalyticsRecord struct { + Method string `json:"method" gorm:"column:method"` + Host string `json:"host" gorm:"column:host"` + Path string `json:"path" gorm:"column:path"` + RawPath string `json:"raw_path" gorm:"column:rawpath"` + ContentLength int64 `json:"content_length" gorm:"column:contentlength"` + UserAgent string `json:"user_agent" gorm:"column:useragent"` + Day int `json:"day" sql:"-"` + Month time.Month `json:"month" sql:"-"` + Year int `json:"year" sql:"-"` + Hour int `json:"hour" sql:"-"` + ResponseCode int `json:"response_code" gorm:"column:responsecode;index"` + APIKey string `json:"api_key" gorm:"column:apikey;index"` + TimeStamp time.Time `json:"timestamp" gorm:"column:timestamp;index"` + APIVersion string `json:"api_version" gorm:"column:apiversion"` + APIName string `json:"api_name" sql:"-"` + APIID string `json:"api_id" gorm:"column:apiid;index"` + OrgID string `json:"org_id" gorm:"column:orgid;index"` + OauthID string `json:"oauth_id" gorm:"column:oauthid;index"` + RequestTime int64 `json:"request_time" gorm:"column:requesttime"` + RawRequest string `json:"raw_request" gorm:"column:rawrequest"` + RawResponse string `json:"raw_response" gorm:"column:rawresponse"` + IPAddress string `json:"ip_address" gorm:"column:ipaddress"` + Geo GeoData `json:"geo" gorm:"embedded"` + Network NetworkStats `json:"network"` + Latency Latency `json:"latency"` + Tags []string `json:"tags"` + Alias string `json:"alias"` + TrackPath bool `json:"track_path" gorm:"column:trackpath"` + ExpireAt time.Time `bson:"expireAt" json:"expireAt"` + ApiSchema string `json:"api_schema" bson:"-" gorm:"-:all"` +} + +func (a *AnalyticsRecord) TableName() string { + return SQLTable +} + +type GraphError struct { + Message string `json:"message"` + Path []interface{} `json:"path"` +} + +type Country struct { + ISOCode string `maxminddb:"iso_code" json:"iso_code"` +} +type City struct { + GeoNameID uint `maxminddb:"geoname_id" json:"geoname_id"` + Names map[string]string `maxminddb:"names" json:"names"` +} + +type Location struct { + Latitude float64 `maxminddb:"latitude" json:"latitude"` + Longitude float64 `maxminddb:"longitude" json:"longitude"` + TimeZone string `maxminddb:"time_zone" json:"time_zone"` +} + +type GeoData struct { + Country Country `maxminddb:"country" json:"country"` + City City `maxminddb:"city" json:"city"` + Location Location `maxminddb:"location" json:"location"` +} + +func (n *NetworkStats) GetFieldNames() []string { + return []string{ + "NetworkStats.OpenConnections", + "NetworkStats.ClosedConnection", + "NetworkStats.BytesIn", + "NetworkStats.BytesOut", + } +} + +func (l *Latency) GetFieldNames() []string { + return []string{ + "Latency.Total", + "Latency.Upstream", + } +} + +func (g *GeoData) GetFieldNames() []string { + return []string{ + "GeoData.Country.ISOCode", + "GeoData.City.GeoNameID", + "GeoData.City.Names", + "GeoData.Location.Latitude", + "GeoData.Location.Longitude", + "GeoData.Location.TimeZone", + } +} + +func (a *AnalyticsRecord) GetFieldNames() []string { + fields := []string{ + "Method", + "Host", + "Path", + "RawPath", + "ContentLength", + "UserAgent", + "Day", + "Month", + "Year", + "Hour", + "ResponseCode", + "APIKey", + "TimeStamp", + "APIVersion", + "APIName", + "APIID", + "OrgID", + "OauthID", + "RequestTime", + "RawRequest", + "RawResponse", + "IPAddress", + } + fields = append(fields, a.Geo.GetFieldNames()...) + fields = append(fields, a.Network.GetFieldNames()...) + fields = append(fields, a.Latency.GetFieldNames()...) + return append(fields, "Tags", "Alias", "TrackPath", "ExpireAt") +} + +func (n *NetworkStats) GetLineValues() []string { + fields := []string{} + fields = append(fields, strconv.FormatUint(uint64(n.OpenConnections), 10)) + fields = append(fields, strconv.FormatUint(uint64(n.ClosedConnection), 10)) + fields = append(fields, strconv.FormatUint(uint64(n.BytesIn), 10)) + return append(fields, strconv.FormatUint(uint64(n.BytesOut), 10)) +} + +func (l *Latency) GetLineValues() []string { + fields := []string{} + fields = append(fields, strconv.FormatUint(uint64(l.Total), 10)) + return append(fields, strconv.FormatUint(uint64(l.Upstream), 10)) +} + +func (g *GeoData) GetLineValues() []string { + fields := []string{} + fields = append(fields, g.Country.ISOCode) + fields = append(fields, strconv.FormatUint(uint64(g.City.GeoNameID), 10)) + keys := make([]string, 0, len(g.City.Names)) + for k := range g.City.Names { + keys = append(keys, k) + } + sort.Strings(keys) + var cityNames string + first := true + for _, key := range keys { + keyval := g.City.Names[key] + if first { + first = false + cityNames = fmt.Sprintf("%s:%s", key, keyval) + } else { + cityNames = fmt.Sprintf("%s;%s:%s", cityNames, key, keyval) + } + } + fields = append(fields, cityNames) + fields = append(fields, strconv.FormatUint(uint64(g.Location.Latitude), 10)) + fields = append(fields, strconv.FormatUint(uint64(g.Location.Longitude), 10)) + return append(fields, g.Location.TimeZone) +} + +func (a *AnalyticsRecord) GetLineValues() []string { + fields := []string{} + fields = append(fields, a.Method, a.Host, a.Path, a.RawPath) + fields = append(fields, strconv.FormatUint(uint64(a.ContentLength), 10)) + fields = append(fields, a.UserAgent) + fields = append(fields, strconv.FormatUint(uint64(a.Day), 10)) + fields = append(fields, a.Month.String()) + fields = append(fields, strconv.FormatUint(uint64(a.Year), 10)) + fields = append(fields, strconv.FormatUint(uint64(a.Hour), 10)) + fields = append(fields, strconv.FormatUint(uint64(a.ResponseCode), 10)) + fields = append(fields, a.APIKey) + fields = append(fields, a.TimeStamp.String()) + fields = append(fields, a.APIVersion, a.APIName, a.APIID, a.OrgID, a.OauthID) + fields = append(fields, strconv.FormatUint(uint64(a.RequestTime), 10)) + fields = append(fields, a.RawRequest, a.RawResponse, a.IPAddress) + fields = append(fields, a.Geo.GetLineValues()...) + fields = append(fields, a.Network.GetLineValues()...) + fields = append(fields, a.Latency.GetLineValues()...) + fields = append(fields, strings.Join(a.Tags[:], ";")) + fields = append(fields, a.Alias) + fields = append(fields, strconv.FormatBool(a.TrackPath)) + fields = append(fields, a.ExpireAt.String()) + return fields +} + +func (a *AnalyticsRecord) TrimRawData(size int) { + // trim RawResponse + a.RawResponse = trimString(size, a.RawResponse) + + // trim RawRequest + a.RawRequest = trimString(size, a.RawRequest) +} + +func (n *NetworkStats) Flush() NetworkStats { + s := NetworkStats{ + OpenConnections: atomic.LoadInt64(&n.OpenConnections), + ClosedConnection: atomic.LoadInt64(&n.ClosedConnection), + BytesIn: atomic.LoadInt64(&n.BytesIn), + BytesOut: atomic.LoadInt64(&n.BytesOut), + } + atomic.StoreInt64(&n.OpenConnections, 0) + atomic.StoreInt64(&n.ClosedConnection, 0) + atomic.StoreInt64(&n.BytesIn, 0) + atomic.StoreInt64(&n.BytesOut, 0) + return s +} + +func (a *AnalyticsRecord) SetExpiry(expiresInSeconds int64) { + expiry := time.Duration(expiresInSeconds) * time.Second + if expiresInSeconds == 0 { + // Expiry is set to 100 years + expiry = (24 * time.Hour) * (365 * 100) + } + + t := time.Now() + t2 := t.Add(expiry) + a.ExpireAt = t2 +} + +func trimString(size int, value string) string { + trimBuffer := bytes.Buffer{} + defer trimBuffer.Reset() + + trimBuffer.Write([]byte(value)) + if trimBuffer.Len() < size { + size = trimBuffer.Len() + } + trimBuffer.Truncate(size) + + return string(trimBuffer.Bytes()) +} + +// TimestampToProto will process timestamps and assign them to the proto record +// protobuf converts all timestamps to UTC so we need to ensure that we keep +// the same original location, in order to do so, we store the location +func (a *AnalyticsRecord) TimestampToProto(newRecord *analyticsproto.AnalyticsRecord) { + // save original location + newRecord.TimeStamp = timestamppb.New(a.TimeStamp) + newRecord.ExpireAt = timestamppb.New(a.ExpireAt) + newRecord.TimeZone = a.TimeStamp.Location().String() +} + +func (a *AnalyticsRecord) TimeStampFromProto(protoRecord analyticsproto.AnalyticsRecord) { + // get timestamp in original location + loc, err := time.LoadLocation(protoRecord.TimeZone) + if err != nil { + log.Error(err) + return + } + + // assign timestamp in original location + a.TimeStamp = protoRecord.TimeStamp.AsTime().In(loc) + a.ExpireAt = protoRecord.ExpireAt.AsTime().In(loc) +} + +func (a *AnalyticsRecord) GetGeo(ipStr string, GeoIPDB *maxminddb.Reader) { + // Not great, tightly coupled + if GeoIPDB == nil { + return + } + + geo, err := GeoIPLookup(ipStr, GeoIPDB) + if err != nil { + log.Error("GeoIP Failure (not recorded): ", err) + return + } + if geo == nil { + return + } + + log.Debug("ISO Code: ", geo.Country.ISOCode) + log.Debug("City: ", geo.City.Names["en"]) + log.Debug("Lat: ", geo.Location.Latitude) + log.Debug("Lon: ", geo.Location.Longitude) + log.Debug("TZ: ", geo.Location.TimeZone) + + a.Geo.Location = geo.Location + a.Geo.Country = geo.Country + a.Geo.City = geo.City +} + +func GeoIPLookup(ipStr string, GeoIPDB *maxminddb.Reader) (*GeoData, error) { + if ipStr == "" { + return nil, nil + } + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP address %q", ipStr) + } + record := new(GeoData) + if err := GeoIPDB.Lookup(ip, record); err != nil { + return nil, fmt.Errorf("geoIPDB lookup of %q failed: %v", ipStr, err) + } + return record, nil +} + +func (a *AnalyticsRecord) IsGraphRecord() bool { + if len(a.Tags) == 0 { + return false + } + + for _, tag := range a.Tags { + if tag == PredefinedTagGraphAnalytics { + return true + } + } + + return false +} + +func (a *AnalyticsRecord) RemoveIgnoredFields(ignoreFields []string) { + for _, fieldToIgnore := range ignoreFields { + found := false + for _, field := range structs.Fields(a) { + fieldTag := field.Tag("json") + if fieldTag == fieldToIgnore { + // setting field to default value + err := field.Zero() + if err != nil { + log.Error("Unable to ignore "+field.Name()+" field: ", err) + } + found = true + continue + } + } + if !found { + log.Error("Error looking for field + ", fieldToIgnore+" in AnalyticsRecord struct: not found.") + } + } +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.proto b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.proto new file mode 100644 index 00000000000..27b90ae91fa --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; +package normalproto; + +/* To generate the code using this file just type: +* protoc --go_out=. analytics.proto +*/ + +import "google/protobuf/timestamp.proto"; +option go_package = "proto/"; + +message AnalyticsRecord { + string Host = 1; + string Method = 2; + string Path = 3; + string RawPath = 4; + int64 ContentLength = 5; + string UserAgent = 6; + int32 Day = 7; + int32 Month = 8; + int32 Year = 9; + int32 Hour = 10; + int32 ResponseCode = 11; + string APIKey = 12; + google.protobuf.Timestamp TimeStamp = 13; + string APIVersion = 14; + string APIName = 15; + string APIID = 16; + string OrgID = 17; + int64 RequestTime = 18; + Latency Latency = 19; + string RawRequest = 20; + string RawResponse = 21; + string IPAddress = 22; + GeoData Geo = 23; + NetworkStats Network = 24; + repeated string Tags = 25; + string Alias = 26; + bool TrackPath = 27; + google.protobuf.Timestamp ExpireAt = 28; + string OauthID = 29; + string TimeZone = 30; + string ApiSchema = 31; +} + +message Latency { + int64 Total = 1; + int64 Upstream = 2; +} + +message Country { + string ISOCode = 1; +} + +message City { + map Names = 1; + uint32 GeoNameID = 2; +} + +message Location { + double Latitude = 1; + double Longitude = 2; + string TimeZone = 3; +} + +message GeoData { + Country Country = 1; + City City = 2; + Location Location = 3; +} + +message NetworkStats { + int64 OpenConnections = 1; + int64 ClosedConnections = 2; + int64 BytesIn = 3; + int64 BytesOut = 4; +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics_filters.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics_filters.go new file mode 100644 index 00000000000..6c69ba898e4 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/analytics_filters.go @@ -0,0 +1,59 @@ +package analytics + +type AnalyticsFilters struct { + // Filters pump data by the whitelisted org_ids. + OrgsIDs []string `json:"org_ids"` + // Filters pump data by the whitelisted api_ids. + APIIDs []string `json:"api_ids"` + // Filters pump data by the whitelisted response_codes. + ResponseCodes []int `json:"response_codes"` + // Filters pump data by the blacklisted org_ids. + SkippedOrgsIDs []string `json:"skip_org_ids"` + // Filters pump data by the blacklisted api_ids. + SkippedAPIIDs []string `json:"skip_api_ids"` + // Filters pump data by the blacklisted response_codes. + SkippedResponseCodes []int `json:"skip_response_codes"` +} + +func (filters AnalyticsFilters) ShouldFilter(record AnalyticsRecord) bool { + switch { + case len(filters.SkippedAPIIDs) > 0 && stringInSlice(record.APIID, filters.SkippedAPIIDs): + return true + case len(filters.SkippedOrgsIDs) > 0 && stringInSlice(record.OrgID, filters.SkippedOrgsIDs): + return true + case len(filters.SkippedResponseCodes) > 0 && intInSlice(record.ResponseCode, filters.SkippedResponseCodes): + return true + case len(filters.APIIDs) > 0 && !stringInSlice(record.APIID, filters.APIIDs): + return true + case len(filters.OrgsIDs) > 0 && !stringInSlice(record.OrgID, filters.OrgsIDs): + return true + case len(filters.ResponseCodes) > 0 && !intInSlice(record.ResponseCode, filters.ResponseCodes): + return true + } + return false +} + +func (filters AnalyticsFilters) HasFilter() bool { + if len(filters.SkippedAPIIDs) == 0 && len(filters.SkippedOrgsIDs) == 0 && len(filters.ResponseCodes) == 0 && len(filters.APIIDs) == 0 && len(filters.OrgsIDs) == 0 && len(filters.SkippedResponseCodes) == 0 { + return false + } + return true +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func intInSlice(a int, list []int) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/graph_record.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/graph_record.go new file mode 100644 index 00000000000..0a87e9259df --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/graph_record.go @@ -0,0 +1,364 @@ +package analytics + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + + "github.com/buger/jsonparser" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" +) + +type GraphRecord struct { + Types map[string][]string `gorm:"types"` + + AnalyticsRecord AnalyticsRecord `bson:",inline" gorm:"embedded;embeddedPrefix:analytics_"` + + OperationType string `gorm:"column:operation_type"` + Variables string `gorm:"variables"` + RootFields []string `gorm:"root_fields"` + Errors []GraphError `gorm:"errors"` + HasErrors bool `gorm:"has_errors"` +} + +// parseRequest reads the raw encoded request and schema, extracting the type information +// operation information and root field operations +// if an error is encountered it simply breaks the operation regardless of how far along it is. +func (g *GraphRecord) parseRequest(encodedRequest, encodedSchema string) { + if encodedRequest == "" || encodedSchema == "" { + log.Warn("empty request/schema") + return + } + rawRequest, err := base64.StdEncoding.DecodeString(encodedRequest) + if err != nil { + log.WithError(err).Error("error decoding raw request") + return + } + + schemaBody, err := base64.StdEncoding.DecodeString(encodedSchema) + if err != nil { + log.WithError(err).Error("error decoding schema") + return + } + + request, schema, operationName, err := generateNormalizedDocuments(rawRequest, schemaBody) + if err != nil { + log.WithError(err).Error("error generating document") + return + } + + if len(request.Input.Variables) != 0 && string(request.Input.Variables) != "null" { + g.Variables = base64.StdEncoding.EncodeToString(request.Input.Variables) + } + + // get the operation ref + operationRef := 0 + if operationName != "" { + for i := range request.OperationDefinitions { + if request.OperationDefinitionNameString(i) == operationName { + operationRef = i + break + } + } + } else if len(request.OperationDefinitions) > 1 { + log.Warn("no operation name specified") + return + } + + // get operation type + switch request.OperationDefinitions[operationRef].OperationType { + case ast.OperationTypeMutation: + g.OperationType = string(ast.DefaultMutationTypeName) + case ast.OperationTypeSubscription: + g.OperationType = string(ast.DefaultSubscriptionTypeName) + case ast.OperationTypeQuery: + g.OperationType = string(ast.DefaultQueryTypeName) + } + + // get the selection set types to start with + fieldTypeList, err := extractOperationSelectionSetTypes(operationRef, &g.RootFields, request, schema) + if err != nil { + log.WithError(err).Error("error extracting selection set types") + return + } + typesToFieldsMap := make(map[string][]string) + for fieldRef, typeDefRef := range fieldTypeList { + if typeDefRef == ast.InvalidRef { + err = errors.New("invalid selection set field type") + log.Warn("invalid type found") + continue + } + extractTypesAndFields(fieldRef, typeDefRef, typesToFieldsMap, request, schema) + } + g.Types = typesToFieldsMap +} + +// parseResponse looks through the encoded response string and parses information like +// the errors +func (g *GraphRecord) parseResponse(encodedResponse string) { + if encodedResponse == "" { + log.Warn("empty response body") + return + } + + responseDecoded, err := base64.StdEncoding.DecodeString(encodedResponse) + if err != nil { + log.WithError(err).Error("error decoding response") + return + } + resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) + if err != nil { + log.WithError(err).Error("error reading raw response") + return + } + defer resp.Body.Close() + + dat, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.WithError(err).Error("error reading response body") + return + } + errBytes, t, _, err := jsonparser.Get(dat, "errors") + // check if the errors key exists in the response + if err != nil && err != jsonparser.KeyPathNotFoundError { + // we got an unexpected error parsing te response + log.WithError(err).Error("error getting response errors") + return + } + if t != jsonparser.NotExist { + // errors key exists so unmarshal it + if err := json.Unmarshal(errBytes, &g.Errors); err != nil { + log.WithError(err).Error("error parsing graph errors") + return + } + g.HasErrors = true + } +} + +func (a *AnalyticsRecord) ToGraphRecord() GraphRecord { + record := GraphRecord{ + AnalyticsRecord: *a, + RootFields: make([]string, 0), + Types: make(map[string][]string), + Errors: make([]GraphError, 0), + } + if a.ResponseCode >= 400 { + record.HasErrors = true + } + + record.parseRequest(a.RawRequest, a.ApiSchema) + + record.parseResponse(a.RawResponse) + + return record +} + +// extractOperationSelectionSetTypes extracts all type names of the selection sets in the operation +// it returns a map of the FieldRef in the req to the type Definition in the schema +func extractOperationSelectionSetTypes(operationRef int, rootFields *[]string, req, schema *ast.Document) (map[int]int, error) { + fieldTypeMap := make(map[int]int) + operationDef := req.OperationDefinitions[operationRef] + if !operationDef.HasSelections { + return nil, errors.New("operation has no selection set") + } + + for _, selRef := range req.SelectionSets[operationDef.SelectionSet].SelectionRefs { + sel := req.Selections[selRef] + if sel.Kind != ast.SelectionKindField { + continue + } + // get selection field def + selFieldDefRef, err := getOperationSelectionFieldDefinition(operationDef.OperationType, req.FieldNameString(sel.Ref), schema) + if selFieldDefRef == ast.InvalidRef || err != nil { + if err != nil { + log.WithError(err).Error("error getting operation field definition") + } + return nil, errors.New("error getting selection set") + } + + *rootFields = append(*rootFields, req.FieldNameString(sel.Ref)) + + typeRef := schema.ResolveUnderlyingType(schema.FieldDefinitions[selFieldDefRef].Type) + if schema.TypeIsScalar(typeRef, schema) || schema.TypeIsEnum(typeRef, schema) { + continue + } + fieldTypeMap[sel.Ref] = getObjectTypeRefWithName(schema.TypeNameString(typeRef), schema) + } + return fieldTypeMap, nil +} + +// extractTypesAndFields extracts all types and type fields used in this request +func extractTypesAndFields(fieldRef, typeDef int, resp map[string][]string, req, schema *ast.Document) { + field := req.Fields[fieldRef] + fieldListForType := make([]string, 0) + + if !field.HasSelections { + return + } + for _, selRef := range req.SelectionSets[field.SelectionSet].SelectionRefs { + sel := req.Selections[selRef] + if sel.Kind != ast.SelectionKindField { + continue + } + fieldListForType = append(fieldListForType, req.FieldNameString(sel.Ref)) + + // get the field definition and run this function on it + fieldDefRef := getObjectFieldRefWithName(req.FieldNameString(sel.Ref), typeDef, schema) + if fieldDefRef == ast.InvalidRef { + continue + } + + fieldDefType := schema.ResolveUnderlyingType(schema.FieldDefinitions[fieldDefRef].Type) + if schema.TypeIsScalar(fieldDefType, schema) || schema.TypeIsEnum(fieldDefType, schema) { + continue + } + + objTypeRef := getObjectTypeRefWithName(schema.TypeNameString(fieldDefType), schema) + if objTypeRef == ast.InvalidRef { + continue + } + + extractTypesAndFields(sel.Ref, objTypeRef, resp, req, schema) + } + + objectTypeName := schema.ObjectTypeDefinitionNameString(typeDef) + _, ok := resp[objectTypeName] + if ok { + resp[objectTypeName] = append(resp[objectTypeName], fieldListForType...) + } else { + resp[objectTypeName] = fieldListForType + } + + resp[objectTypeName] = fieldListForType +} + +// getObjectFieldRefWithName gets the object field reference from the object type using the name from the schame +func getObjectFieldRefWithName(name string, objTypeRef int, schema *ast.Document) int { + objectTypeDefinition := schema.ObjectTypeDefinitions[objTypeRef] + if !objectTypeDefinition.HasFieldDefinitions { + return ast.InvalidRef + } + for _, r := range objectTypeDefinition.FieldsDefinition.Refs { + if schema.FieldDefinitionNameString(r) == name { + return r + } + } + return ast.InvalidRef +} + +// getObjectTypeRefWithName gets the ref of the type from the schema using the name +func getObjectTypeRefWithName(name string, schema *ast.Document) int { + n, ok := schema.Index.FirstNodeByNameStr(name) + if !ok { + return ast.InvalidRef + } + if n.Kind != ast.NodeKindObjectTypeDefinition { + return ast.InvalidRef + } + return n.Ref +} + +// generateNormalizedDocuments generates and normalizes the ast documents from the raw request and the raw schema +func generateNormalizedDocuments(requestRaw, schemaRaw []byte) (r, s *ast.Document, operationName string, err error) { + httpRequest, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(requestRaw))) + if err != nil { + log.WithError(err).Error("error parsing request") + return + } + var gqlRequest gql.Request + err = gql.UnmarshalRequest(httpRequest.Body, &gqlRequest) + if err != nil { + log.WithError(err).Error("error unmarshalling request") + return + } + operationName = gqlRequest.OperationName + + schema, err := gql.NewSchemaFromString(string(schemaRaw)) + if err != nil { + return + } + schemaDoc, operationReport := astparser.ParseGraphqlDocumentBytes(schema.Document()) + if operationReport.HasErrors() { + err = operationReport + return + } + s = &schemaDoc + + requestDoc, operationReport := astparser.ParseGraphqlDocumentString(gqlRequest.Query) + if operationReport.HasErrors() { + err = operationReport + log.WithError(err).Error("error parsing request document") + return + } + r = &requestDoc + r.Input.Variables = gqlRequest.Variables + normalizer := astnormalization.NewWithOpts( + astnormalization.WithRemoveFragmentDefinitions(), + ) + + var report operationreport.Report + if operationName != "" { + normalizer.NormalizeNamedOperation(r, s, []byte(operationName), &report) + } else { + normalizer.NormalizeOperation(r, s, &report) + } + if report.HasErrors() { + log.WithError(report).Error("error normalizing") + err = report + return + } + return +} + +// getOperationSelectionFieldDefinition gets the schema's field definition ref for the selection set of the operation type in question +func getOperationSelectionFieldDefinition(operationType ast.OperationType, opSelectionName string, schema *ast.Document) (int, error) { + var ( + node ast.Node + found bool + ) + switch operationType { + case ast.OperationTypeQuery: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.QueryTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing query type declaration") + } + case ast.OperationTypeMutation: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.MutationTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing mutation type declaration") + } + case ast.OperationTypeSubscription: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.SubscriptionTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing subscription type declaration") + } + default: + return ast.InvalidRef, fmt.Errorf("unknown operation") + } + if node.Kind != ast.NodeKindObjectTypeDefinition { + return ast.InvalidRef, fmt.Errorf("invalid node type") + } + + operationObjDefinition := schema.ObjectTypeDefinitions[node.Ref] + if !operationObjDefinition.HasFieldDefinitions { + return ast.InvalidRef, nil + } + + for _, fieldRef := range operationObjDefinition.FieldsDefinition.Refs { + if opSelectionName == schema.FieldDefinitionNameString(fieldRef) { + return fieldRef, nil + } + } + + return ast.InvalidRef, fmt.Errorf("field not found") +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/proto/analytics.pb.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/proto/analytics.pb.go new file mode 100644 index 00000000000..205ad364352 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/proto/analytics.pb.go @@ -0,0 +1,927 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.4 +// source: analytics.proto + +package proto + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AnalyticsRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Method string `protobuf:"bytes,2,opt,name=Method,proto3" json:"Method,omitempty"` + Path string `protobuf:"bytes,3,opt,name=Path,proto3" json:"Path,omitempty"` + RawPath string `protobuf:"bytes,4,opt,name=RawPath,proto3" json:"RawPath,omitempty"` + ContentLength int64 `protobuf:"varint,5,opt,name=ContentLength,proto3" json:"ContentLength,omitempty"` + UserAgent string `protobuf:"bytes,6,opt,name=UserAgent,proto3" json:"UserAgent,omitempty"` + Day int32 `protobuf:"varint,7,opt,name=Day,proto3" json:"Day,omitempty"` + Month int32 `protobuf:"varint,8,opt,name=Month,proto3" json:"Month,omitempty"` + Year int32 `protobuf:"varint,9,opt,name=Year,proto3" json:"Year,omitempty"` + Hour int32 `protobuf:"varint,10,opt,name=Hour,proto3" json:"Hour,omitempty"` + ResponseCode int32 `protobuf:"varint,11,opt,name=ResponseCode,proto3" json:"ResponseCode,omitempty"` + APIKey string `protobuf:"bytes,12,opt,name=APIKey,proto3" json:"APIKey,omitempty"` + TimeStamp *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=TimeStamp,proto3" json:"TimeStamp,omitempty"` + APIVersion string `protobuf:"bytes,14,opt,name=APIVersion,proto3" json:"APIVersion,omitempty"` + APIName string `protobuf:"bytes,15,opt,name=APIName,proto3" json:"APIName,omitempty"` + APIID string `protobuf:"bytes,16,opt,name=APIID,proto3" json:"APIID,omitempty"` + OrgID string `protobuf:"bytes,17,opt,name=OrgID,proto3" json:"OrgID,omitempty"` + RequestTime int64 `protobuf:"varint,18,opt,name=RequestTime,proto3" json:"RequestTime,omitempty"` + Latency *Latency `protobuf:"bytes,19,opt,name=Latency,proto3" json:"Latency,omitempty"` + RawRequest string `protobuf:"bytes,20,opt,name=RawRequest,proto3" json:"RawRequest,omitempty"` + RawResponse string `protobuf:"bytes,21,opt,name=RawResponse,proto3" json:"RawResponse,omitempty"` + IPAddress string `protobuf:"bytes,22,opt,name=IPAddress,proto3" json:"IPAddress,omitempty"` + Geo *GeoData `protobuf:"bytes,23,opt,name=Geo,proto3" json:"Geo,omitempty"` + Network *NetworkStats `protobuf:"bytes,24,opt,name=Network,proto3" json:"Network,omitempty"` + Tags []string `protobuf:"bytes,25,rep,name=Tags,proto3" json:"Tags,omitempty"` + Alias string `protobuf:"bytes,26,opt,name=Alias,proto3" json:"Alias,omitempty"` + TrackPath bool `protobuf:"varint,27,opt,name=TrackPath,proto3" json:"TrackPath,omitempty"` + ExpireAt *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=ExpireAt,proto3" json:"ExpireAt,omitempty"` + OauthID string `protobuf:"bytes,29,opt,name=OauthID,proto3" json:"OauthID,omitempty"` + TimeZone string `protobuf:"bytes,30,opt,name=TimeZone,proto3" json:"TimeZone,omitempty"` + ApiSchema string `protobuf:"bytes,31,opt,name=ApiSchema,proto3" json:"ApiSchema,omitempty"` +} + +func (x *AnalyticsRecord) Reset() { + *x = AnalyticsRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnalyticsRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnalyticsRecord) ProtoMessage() {} + +func (x *AnalyticsRecord) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnalyticsRecord.ProtoReflect.Descriptor instead. +func (*AnalyticsRecord) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{0} +} + +func (x *AnalyticsRecord) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *AnalyticsRecord) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *AnalyticsRecord) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *AnalyticsRecord) GetRawPath() string { + if x != nil { + return x.RawPath + } + return "" +} + +func (x *AnalyticsRecord) GetContentLength() int64 { + if x != nil { + return x.ContentLength + } + return 0 +} + +func (x *AnalyticsRecord) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *AnalyticsRecord) GetDay() int32 { + if x != nil { + return x.Day + } + return 0 +} + +func (x *AnalyticsRecord) GetMonth() int32 { + if x != nil { + return x.Month + } + return 0 +} + +func (x *AnalyticsRecord) GetYear() int32 { + if x != nil { + return x.Year + } + return 0 +} + +func (x *AnalyticsRecord) GetHour() int32 { + if x != nil { + return x.Hour + } + return 0 +} + +func (x *AnalyticsRecord) GetResponseCode() int32 { + if x != nil { + return x.ResponseCode + } + return 0 +} + +func (x *AnalyticsRecord) GetAPIKey() string { + if x != nil { + return x.APIKey + } + return "" +} + +func (x *AnalyticsRecord) GetTimeStamp() *timestamppb.Timestamp { + if x != nil { + return x.TimeStamp + } + return nil +} + +func (x *AnalyticsRecord) GetAPIVersion() string { + if x != nil { + return x.APIVersion + } + return "" +} + +func (x *AnalyticsRecord) GetAPIName() string { + if x != nil { + return x.APIName + } + return "" +} + +func (x *AnalyticsRecord) GetAPIID() string { + if x != nil { + return x.APIID + } + return "" +} + +func (x *AnalyticsRecord) GetOrgID() string { + if x != nil { + return x.OrgID + } + return "" +} + +func (x *AnalyticsRecord) GetRequestTime() int64 { + if x != nil { + return x.RequestTime + } + return 0 +} + +func (x *AnalyticsRecord) GetLatency() *Latency { + if x != nil { + return x.Latency + } + return nil +} + +func (x *AnalyticsRecord) GetRawRequest() string { + if x != nil { + return x.RawRequest + } + return "" +} + +func (x *AnalyticsRecord) GetRawResponse() string { + if x != nil { + return x.RawResponse + } + return "" +} + +func (x *AnalyticsRecord) GetIPAddress() string { + if x != nil { + return x.IPAddress + } + return "" +} + +func (x *AnalyticsRecord) GetGeo() *GeoData { + if x != nil { + return x.Geo + } + return nil +} + +func (x *AnalyticsRecord) GetNetwork() *NetworkStats { + if x != nil { + return x.Network + } + return nil +} + +func (x *AnalyticsRecord) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *AnalyticsRecord) GetAlias() string { + if x != nil { + return x.Alias + } + return "" +} + +func (x *AnalyticsRecord) GetTrackPath() bool { + if x != nil { + return x.TrackPath + } + return false +} + +func (x *AnalyticsRecord) GetExpireAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpireAt + } + return nil +} + +func (x *AnalyticsRecord) GetOauthID() string { + if x != nil { + return x.OauthID + } + return "" +} + +func (x *AnalyticsRecord) GetTimeZone() string { + if x != nil { + return x.TimeZone + } + return "" +} + +func (x *AnalyticsRecord) GetApiSchema() string { + if x != nil { + return x.ApiSchema + } + return "" +} + +type Latency struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` + Upstream int64 `protobuf:"varint,2,opt,name=Upstream,proto3" json:"Upstream,omitempty"` +} + +func (x *Latency) Reset() { + *x = Latency{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Latency) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Latency) ProtoMessage() {} + +func (x *Latency) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Latency.ProtoReflect.Descriptor instead. +func (*Latency) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{1} +} + +func (x *Latency) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *Latency) GetUpstream() int64 { + if x != nil { + return x.Upstream + } + return 0 +} + +type Country struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ISOCode string `protobuf:"bytes,1,opt,name=ISOCode,proto3" json:"ISOCode,omitempty"` +} + +func (x *Country) Reset() { + *x = Country{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Country) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Country) ProtoMessage() {} + +func (x *Country) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Country.ProtoReflect.Descriptor instead. +func (*Country) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{2} +} + +func (x *Country) GetISOCode() string { + if x != nil { + return x.ISOCode + } + return "" +} + +type City struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names map[string]string `protobuf:"bytes,1,rep,name=Names,proto3" json:"Names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + GeoNameID uint32 `protobuf:"varint,2,opt,name=GeoNameID,proto3" json:"GeoNameID,omitempty"` +} + +func (x *City) Reset() { + *x = City{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *City) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*City) ProtoMessage() {} + +func (x *City) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use City.ProtoReflect.Descriptor instead. +func (*City) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{3} +} + +func (x *City) GetNames() map[string]string { + if x != nil { + return x.Names + } + return nil +} + +func (x *City) GetGeoNameID() uint32 { + if x != nil { + return x.GeoNameID + } + return 0 +} + +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Latitude float64 `protobuf:"fixed64,1,opt,name=Latitude,proto3" json:"Latitude,omitempty"` + Longitude float64 `protobuf:"fixed64,2,opt,name=Longitude,proto3" json:"Longitude,omitempty"` + TimeZone string `protobuf:"bytes,3,opt,name=TimeZone,proto3" json:"TimeZone,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{4} +} + +func (x *Location) GetLatitude() float64 { + if x != nil { + return x.Latitude + } + return 0 +} + +func (x *Location) GetLongitude() float64 { + if x != nil { + return x.Longitude + } + return 0 +} + +func (x *Location) GetTimeZone() string { + if x != nil { + return x.TimeZone + } + return "" +} + +type GeoData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Country *Country `protobuf:"bytes,1,opt,name=Country,proto3" json:"Country,omitempty"` + City *City `protobuf:"bytes,2,opt,name=City,proto3" json:"City,omitempty"` + Location *Location `protobuf:"bytes,3,opt,name=Location,proto3" json:"Location,omitempty"` +} + +func (x *GeoData) Reset() { + *x = GeoData{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeoData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeoData) ProtoMessage() {} + +func (x *GeoData) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeoData.ProtoReflect.Descriptor instead. +func (*GeoData) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{5} +} + +func (x *GeoData) GetCountry() *Country { + if x != nil { + return x.Country + } + return nil +} + +func (x *GeoData) GetCity() *City { + if x != nil { + return x.City + } + return nil +} + +func (x *GeoData) GetLocation() *Location { + if x != nil { + return x.Location + } + return nil +} + +type NetworkStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OpenConnections int64 `protobuf:"varint,1,opt,name=OpenConnections,proto3" json:"OpenConnections,omitempty"` + ClosedConnections int64 `protobuf:"varint,2,opt,name=ClosedConnections,proto3" json:"ClosedConnections,omitempty"` + BytesIn int64 `protobuf:"varint,3,opt,name=BytesIn,proto3" json:"BytesIn,omitempty"` + BytesOut int64 `protobuf:"varint,4,opt,name=BytesOut,proto3" json:"BytesOut,omitempty"` +} + +func (x *NetworkStats) Reset() { + *x = NetworkStats{} + if protoimpl.UnsafeEnabled { + mi := &file_analytics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkStats) ProtoMessage() {} + +func (x *NetworkStats) ProtoReflect() protoreflect.Message { + mi := &file_analytics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkStats.ProtoReflect.Descriptor instead. +func (*NetworkStats) Descriptor() ([]byte, []int) { + return file_analytics_proto_rawDescGZIP(), []int{6} +} + +func (x *NetworkStats) GetOpenConnections() int64 { + if x != nil { + return x.OpenConnections + } + return 0 +} + +func (x *NetworkStats) GetClosedConnections() int64 { + if x != nil { + return x.ClosedConnections + } + return 0 +} + +func (x *NetworkStats) GetBytesIn() int64 { + if x != nil { + return x.BytesIn + } + return 0 +} + +func (x *NetworkStats) GetBytesOut() int64 { + if x != nil { + return x.BytesOut + } + return 0 +} + +var File_analytics_proto protoreflect.FileDescriptor + +var file_analytics_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0b, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xbe, 0x07, 0x0a, 0x0f, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x61, 0x77, 0x50, 0x61, 0x74, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x61, 0x77, 0x50, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, + 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x61, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x44, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x59, 0x65, 0x61, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x59, 0x65, 0x61, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x48, 0x6f, 0x75, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x48, 0x6f, 0x75, + 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x41, 0x50, 0x49, 0x4b, 0x65, 0x79, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x41, 0x50, 0x49, 0x4b, 0x65, 0x79, 0x12, 0x38, 0x0a, + 0x09, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x54, 0x69, + 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x41, 0x50, 0x49, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x50, 0x49, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x50, 0x49, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x41, 0x50, 0x49, 0x49, 0x44, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x41, 0x50, 0x49, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x4f, 0x72, 0x67, 0x49, 0x44, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4f, 0x72, 0x67, 0x49, 0x44, 0x12, 0x20, 0x0a, + 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x2e, 0x0a, 0x07, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x07, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, + 0x1e, 0x0a, 0x0a, 0x52, 0x61, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x52, 0x61, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x20, 0x0a, 0x0b, 0x52, 0x61, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x61, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x49, 0x50, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x49, 0x50, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x26, 0x0a, 0x03, 0x47, 0x65, 0x6f, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x47, 0x65, 0x6f, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x03, 0x47, 0x65, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, + 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, + 0x54, 0x61, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x50, + 0x61, 0x74, 0x68, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x54, 0x72, 0x61, 0x63, 0x6b, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, + 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x49, 0x44, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, + 0x6e, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, + 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x70, 0x69, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x70, 0x69, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x22, 0x3b, 0x0a, 0x07, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x22, 0x23, 0x0a, + 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x49, 0x53, 0x4f, 0x43, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x49, 0x53, 0x4f, 0x43, 0x6f, + 0x64, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x04, 0x43, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x74, 0x79, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x47, 0x65, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x47, 0x65, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x1a, 0x38, 0x0a, + 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x60, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x61, 0x74, 0x69, 0x74, 0x75, 0x64, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x4c, 0x61, 0x74, 0x69, 0x74, 0x75, 0x64, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x6e, 0x67, 0x69, 0x74, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x4c, 0x6f, 0x6e, 0x67, 0x69, 0x74, 0x75, 0x64, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x07, 0x47, 0x65, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x04, 0x43, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x43, 0x69, 0x74, 0x79, 0x52, 0x04, 0x43, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x08, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x9c, 0x01, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x28, 0x0a, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x49, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x49, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4f, 0x75, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4f, 0x75, 0x74, 0x42, 0x08, + 0x5a, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_analytics_proto_rawDescOnce sync.Once + file_analytics_proto_rawDescData = file_analytics_proto_rawDesc +) + +func file_analytics_proto_rawDescGZIP() []byte { + file_analytics_proto_rawDescOnce.Do(func() { + file_analytics_proto_rawDescData = protoimpl.X.CompressGZIP(file_analytics_proto_rawDescData) + }) + return file_analytics_proto_rawDescData +} + +var file_analytics_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_analytics_proto_goTypes = []interface{}{ + (*AnalyticsRecord)(nil), // 0: normalproto.AnalyticsRecord + (*Latency)(nil), // 1: normalproto.Latency + (*Country)(nil), // 2: normalproto.Country + (*City)(nil), // 3: normalproto.City + (*Location)(nil), // 4: normalproto.Location + (*GeoData)(nil), // 5: normalproto.GeoData + (*NetworkStats)(nil), // 6: normalproto.NetworkStats + nil, // 7: normalproto.City.NamesEntry + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp +} +var file_analytics_proto_depIdxs = []int32{ + 8, // 0: normalproto.AnalyticsRecord.TimeStamp:type_name -> google.protobuf.Timestamp + 1, // 1: normalproto.AnalyticsRecord.Latency:type_name -> normalproto.Latency + 5, // 2: normalproto.AnalyticsRecord.Geo:type_name -> normalproto.GeoData + 6, // 3: normalproto.AnalyticsRecord.Network:type_name -> normalproto.NetworkStats + 8, // 4: normalproto.AnalyticsRecord.ExpireAt:type_name -> google.protobuf.Timestamp + 7, // 5: normalproto.City.Names:type_name -> normalproto.City.NamesEntry + 2, // 6: normalproto.GeoData.Country:type_name -> normalproto.Country + 3, // 7: normalproto.GeoData.City:type_name -> normalproto.City + 4, // 8: normalproto.GeoData.Location:type_name -> normalproto.Location + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_analytics_proto_init() } +func file_analytics_proto_init() { + if File_analytics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_analytics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnalyticsRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Latency); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Country); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*City); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeoData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_analytics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_analytics_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_analytics_proto_goTypes, + DependencyIndexes: file_analytics_proto_depIdxs, + MessageInfos: file_analytics_proto_msgTypes, + }.Build() + File_analytics_proto = out.File + file_analytics_proto_rawDesc = nil + file_analytics_proto_goTypes = nil + file_analytics_proto_depIdxs = nil +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/analytics/uptime_data.go b/vendor/github.com/TykTechnologies/tyk-pump/analytics/uptime_data.go new file mode 100644 index 00000000000..9db8f0f1502 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/analytics/uptime_data.go @@ -0,0 +1,251 @@ +package analytics + +import ( + "strconv" + "time" + + "gorm.io/gorm" + + "github.com/fatih/structs" +) + +const UptimeSQLTable = "tyk_uptime_analytics" + +type UptimeReportData struct { + URL string `json:"url"` + RequestTime int64 `json:"request_time"` + ResponseCode int `json:"response_code"` + TCPError bool `json:"tcp_error"` + ServerError bool `json:"server_error"` + Day int `json:"day"` + Month time.Month `json:"month"` + Year int `json:"year"` + Hour int `json:"hour"` + Minute int `json:"minute"` + TimeStamp time.Time `json:"timestamp"` + ExpireAt time.Time `bson:"expireAt"` + APIID string `json:"api_id"` + OrgID string `json:"org_id"` +} + +type UptimeReportAggregateSQL struct { + ID string `gorm:"primaryKey"` + + Counter `json:"counter" gorm:"embedded"` + + TimeStamp int64 `json:"timestamp" gorm:"index:dimension, priority:1"` + OrgID string `json:"org_id" gorm:"index:dimension, priority:2"` + Dimension string `json:"dimension" gorm:"index:dimension, priority:3"` + DimensionValue string `json:"dimension_value" gorm:"index:dimension, priority:4"` + + Code `json:"code" gorm:"embedded"` +} + +func (a *UptimeReportAggregateSQL) TableName() string { + return UptimeSQLTable +} + +func OnConflictUptimeAssignments(tableName string, tempTable string) map[string]interface{} { + assignments := make(map[string]interface{}) + f := UptimeReportAggregateSQL{} + baseFields := structs.Fields(f.Code) + for _, field := range baseFields { + jsonTag := field.Tag("json") + colName := "code_" + jsonTag + assignments[colName] = gorm.Expr(tableName + "." + colName + " + " + tempTable + "." + colName) + + } + + fields := structs.Fields(f.Counter) + for _, field := range fields { + jsonTag := field.Tag("json") + colName := "counter_" + jsonTag + switch jsonTag { + case "hits", "error", "success", "total_request_time": + assignments[colName] = gorm.Expr(tableName + "." + colName + " + " + tempTable + "." + colName) + case "request_time": + if !field.IsZero() { + assignments[colName] = gorm.Expr("(" + tableName + ".counter_total_request_time +" + tempTable + "." + "counter_total_request_time" + ")/( " + tableName + ".counter_hits + " + tempTable + ".counter_hits" + ")") + } + case "last_time": + assignments[colName] = gorm.Expr(tempTable + "." + colName) + } + } + return assignments +} + +func (u *UptimeReportAggregate) Dimensions() (dimensions []Dimension) { + for key, inc := range u.URL { + dimensions = append(dimensions, Dimension{"url", key, inc}) + } + + for key, inc := range u.Errors { + dimensions = append(dimensions, Dimension{"errors", key, inc}) + } + + dimensions = append(dimensions, Dimension{"", "total", &u.Total}) + + return +} + +type UptimeReportAggregate struct { + TimeStamp time.Time + OrgID string + TimeID struct { + Year int + Month int + Day int + Hour int + } + + URL map[string]*Counter + Errors map[string]*Counter + + Total Counter + + ExpireAt time.Time `bson:"expireAt" json:"expireAt"` + LastTime time.Time +} + +func (u UptimeReportAggregate) New() UptimeReportAggregate { + agg := UptimeReportAggregate{} + + agg.URL = make(map[string]*Counter) + agg.Errors = make(map[string]*Counter) + + return agg +} + +func AggregateUptimeData(data []UptimeReportData) map[string]UptimeReportAggregate { + analyticsPerOrg := make(map[string]UptimeReportAggregate) + + for _, thisV := range data { + orgID := thisV.OrgID + + if orgID == "" { + continue + } + + thisAggregate, found := analyticsPerOrg[orgID] + + if !found { + thisAggregate = UptimeReportAggregate{}.New() + + // Set the hourly timestamp & expiry + asTime := thisV.TimeStamp + thisAggregate.TimeStamp = time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), 0, 0, 0, asTime.Location()) + + thisAggregate.ExpireAt = thisV.ExpireAt + thisAggregate.TimeID.Year = asTime.Year() + thisAggregate.TimeID.Month = int(asTime.Month()) + thisAggregate.TimeID.Day = asTime.Day() + thisAggregate.TimeID.Hour = asTime.Hour() + thisAggregate.OrgID = orgID + thisAggregate.LastTime = thisV.TimeStamp + thisAggregate.Total.ErrorMap = make(map[string]int) + } + + // Always update the last timestamp + thisAggregate.LastTime = thisV.TimeStamp + + // Create the counter for this record + var thisCounter Counter + if thisV.ResponseCode == -1 { + thisCounter = Counter{ + LastTime: thisV.TimeStamp, + } + if thisV.URL != "" { + c := thisAggregate.URL[thisV.URL] + if c == nil { + c = &Counter{ + Identifier: thisV.URL, + } + thisAggregate.URL[thisV.URL] = c + } + } + } else { + thisCounter = Counter{ + Hits: 1, + Success: 0, + ErrorTotal: 0, + RequestTime: float64(thisV.RequestTime), + TotalRequestTime: float64(thisV.RequestTime), + LastTime: thisV.TimeStamp, + ErrorMap: make(map[string]int), + } + thisAggregate.Total.Hits++ + thisAggregate.Total.TotalRequestTime += float64(thisV.RequestTime) + + // We need an initial value + thisAggregate.Total.RequestTime = thisAggregate.Total.TotalRequestTime / float64(thisAggregate.Total.Hits) + if thisV.ResponseCode >= 400 { + thisCounter.ErrorTotal = 1 + thisCounter.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ + thisAggregate.Total.ErrorTotal++ + thisAggregate.Total.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ + } + + if (thisV.ResponseCode < 300) && (thisV.ResponseCode >= 200) { + thisCounter.Success = 1 + thisAggregate.Total.Success++ + // using the errorMap as ResponseCode Map for SQL purpose + thisCounter.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ + thisAggregate.Total.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ + } + + // Convert to a map (for easy iteration) + vAsMap := structs.Map(thisV) + for key, value := range vAsMap { + + // Mini function to handle incrementing a specific counter in our object + IncrementOrSetUnit := func(c *Counter) *Counter { + if c == nil { + newCounter := thisCounter + newCounter.ErrorMap = make(map[string]int) + for k, v := range thisCounter.ErrorMap { + newCounter.ErrorMap[k] = v + } + c = &newCounter + } else { + c.Hits += thisCounter.Hits + c.Success += thisCounter.Success + c.ErrorTotal += thisCounter.ErrorTotal + for k, v := range thisCounter.ErrorMap { + c.ErrorMap[k] += v + } + c.TotalRequestTime += thisCounter.TotalRequestTime + c.RequestTime = c.TotalRequestTime / float64(c.Hits) + } + + return c + } + + switch key { + case "URL": + c := IncrementOrSetUnit(thisAggregate.URL[value.(string)]) + if value.(string) != "" { + thisAggregate.URL[value.(string)] = c + thisAggregate.URL[value.(string)].Identifier = thisV.URL + } + break + case "ResponseCode": + errAsStr := strconv.Itoa(value.(int)) + if errAsStr != "" { + c := IncrementOrSetUnit(thisAggregate.Errors[errAsStr]) + if c.ErrorTotal > 0 { + thisAggregate.Errors[errAsStr] = c + thisAggregate.Errors[errAsStr].Identifier = errAsStr + } + } + break + } + } + + } + + analyticsPerOrg[orgID] = thisAggregate + + } + + return analyticsPerOrg +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/logger/init.go b/vendor/github.com/TykTechnologies/tyk-pump/logger/init.go new file mode 100644 index 00000000000..e35edf30bea --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/logger/init.go @@ -0,0 +1,40 @@ +package logger + +import ( + "os" + "strings" + + "github.com/sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Level = level(os.Getenv("TYK_LOGLEVEL")) + log.Formatter = formatter() +} + +func level(level string) logrus.Level { + switch strings.ToLower(level) { + case "error": + return logrus.ErrorLevel + case "warn": + return logrus.WarnLevel + case "debug": + return logrus.DebugLevel + default: + return logrus.InfoLevel + } +} + +func formatter() *logrus.TextFormatter { + formatter := new(logrus.TextFormatter) + formatter.TimestampFormat = `Jan 02 15:04:05` + formatter.FullTimestamp = true + formatter.DisableColors = true + return formatter +} + +func GetLogger() *logrus.Logger { + return log +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/serializer/msgp.go b/vendor/github.com/TykTechnologies/tyk-pump/serializer/msgp.go new file mode 100644 index 00000000000..52ac3d9dde3 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/serializer/msgp.go @@ -0,0 +1,29 @@ +package serializer + +import ( + "github.com/TykTechnologies/tyk-pump/analytics" + "gopkg.in/vmihailenco/msgpack.v2" +) + +type MsgpSerializer struct { +} + +func (serializer *MsgpSerializer) Encode(record *analytics.AnalyticsRecord) ([]byte, error) { + return msgpack.Marshal(record) +} + +func (serializer *MsgpSerializer) Decode(analyticsData interface{}, record *analytics.AnalyticsRecord) error { + data := []byte{} + switch analyticsData.(type) { + case string: + data = []byte(analyticsData.(string)) + case []byte: + data = analyticsData.([]byte) + } + + return msgpack.Unmarshal(data, record) +} + +func (serializer *MsgpSerializer) GetSuffix() string { + return "" +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/serializer/protobuf.go b/vendor/github.com/TykTechnologies/tyk-pump/serializer/protobuf.go new file mode 100644 index 00000000000..a90e2ae45ef --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/serializer/protobuf.go @@ -0,0 +1,151 @@ +package serializer + +import ( + "time" + + "github.com/TykTechnologies/tyk-pump/analytics" + analyticsproto "github.com/TykTechnologies/tyk-pump/analytics/proto" + "github.com/golang/protobuf/proto" +) + +type ProtobufSerializer struct { +} + +func (pb *ProtobufSerializer) GetSuffix() string { + return "_protobuf" +} + +func (pb *ProtobufSerializer) Encode(record *analytics.AnalyticsRecord) ([]byte, error) { + protoRecord := pb.TransformSingleRecordToProto(*record) + return proto.Marshal(&protoRecord) +} + +func (pb *ProtobufSerializer) Decode(analyticsData interface{}, record *analytics.AnalyticsRecord) error { + protoData := analyticsproto.AnalyticsRecord{} + err := proto.Unmarshal(analyticsData.([]byte), &protoData) + if err != nil { + return err + } + return pb.TransformSingleProtoToAnalyticsRecord(protoData, record) +} + +func (pb *ProtobufSerializer) TransformSingleRecordToProto(rec analytics.AnalyticsRecord) analyticsproto.AnalyticsRecord { + latency := analyticsproto.Latency{ + Total: rec.Latency.Total, + Upstream: rec.Latency.Upstream, + } + + net := analyticsproto.NetworkStats{ + OpenConnections: rec.Network.OpenConnections, + ClosedConnections: rec.Network.ClosedConnection, + BytesIn: rec.Network.BytesIn, + BytesOut: rec.Network.BytesOut, + } + + geo := analyticsproto.GeoData{ + Country: &analyticsproto.Country{ + ISOCode: rec.Geo.Country.ISOCode, + }, + City: &analyticsproto.City{ + GeoNameID: uint32(rec.Geo.City.GeoNameID), + Names: rec.Geo.City.Names, + }, + Location: &analyticsproto.Location{ + Latitude: rec.Geo.Location.Latitude, + Longitude: rec.Geo.Location.Longitude, + TimeZone: rec.Geo.Location.TimeZone, + }, + } + + record := analyticsproto.AnalyticsRecord{ + Host: rec.Host, + Method: rec.Method, + Path: rec.Path, + RawPath: rec.RawPath, + ContentLength: rec.ContentLength, + UserAgent: rec.UserAgent, + Day: int32(rec.Day), + Month: int32(rec.Month), + Year: int32(rec.Year), + Hour: int32(rec.Hour), + ResponseCode: int32(rec.ResponseCode), + APIKey: rec.APIKey, + APIVersion: rec.APIVersion, + APIName: rec.APIName, + APIID: rec.APIID, + OrgID: rec.OrgID, + RequestTime: rec.RequestTime, + Latency: &latency, + RawRequest: rec.RawRequest, + RawResponse: rec.RawResponse, + IPAddress: rec.IPAddress, + Geo: &geo, + Network: &net, + Tags: rec.Tags, + Alias: rec.Alias, + TrackPath: rec.TrackPath, + OauthID: rec.OauthID, + ApiSchema: rec.ApiSchema, + } + rec.TimestampToProto(&record) + + return record +} + +func (pb *ProtobufSerializer) TransformSingleProtoToAnalyticsRecord(rec analyticsproto.AnalyticsRecord, record *analytics.AnalyticsRecord) error { + + tmpRecord := analytics.AnalyticsRecord{ + Method: rec.Method, + Host: rec.Host, + Path: rec.Path, + RawPath: rec.RawPath, + ContentLength: rec.ContentLength, + UserAgent: rec.UserAgent, + Day: int(rec.Day), + Month: time.Month(rec.Month), + Year: int(rec.Year), + Hour: int(rec.Hour), + ResponseCode: int(rec.ResponseCode), + APIKey: rec.APIKey, + APIVersion: rec.APIVersion, + APIName: rec.APIName, + APIID: rec.APIID, + OrgID: rec.OrgID, + OauthID: rec.OauthID, + RequestTime: rec.RequestTime, + RawRequest: rec.RawRequest, + RawResponse: rec.RawResponse, + IPAddress: rec.IPAddress, + Geo: analytics.GeoData{ + Country: analytics.Country{ + ISOCode: rec.Geo.Country.ISOCode, + }, + City: analytics.City{ + GeoNameID: uint(rec.Geo.City.GeoNameID), + Names: nil, + }, + Location: analytics.Location{ + Latitude: rec.Geo.Location.Latitude, + Longitude: rec.Geo.Location.Longitude, + TimeZone: rec.Geo.Location.TimeZone, + }, + }, + Network: analytics.NetworkStats{ + OpenConnections: rec.Network.OpenConnections, + ClosedConnection: rec.Network.ClosedConnections, + BytesIn: rec.Network.BytesIn, + BytesOut: rec.Network.BytesOut, + }, + Latency: analytics.Latency{ + Total: rec.Latency.Total, + Upstream: rec.Latency.Upstream, + }, + Tags: rec.Tags, + Alias: rec.Alias, + TrackPath: rec.TrackPath, + ApiSchema: rec.ApiSchema, + } + tmpRecord.TimeStampFromProto(rec) + *record = tmpRecord + return nil +} diff --git a/vendor/github.com/TykTechnologies/tyk-pump/serializer/serializer.go b/vendor/github.com/TykTechnologies/tyk-pump/serializer/serializer.go new file mode 100644 index 00000000000..bb5f160dd74 --- /dev/null +++ b/vendor/github.com/TykTechnologies/tyk-pump/serializer/serializer.go @@ -0,0 +1,30 @@ +package serializer + +import ( + "github.com/TykTechnologies/tyk-pump/analytics" + logger "github.com/TykTechnologies/tyk-pump/logger" +) + +var log = logger.GetLogger() + +type AnalyticsSerializer interface { + Encode(record *analytics.AnalyticsRecord) ([]byte, error) + Decode(analyticsData interface{}, record *analytics.AnalyticsRecord) error + GetSuffix() string +} + +const MSGP_SERIALIZER = "msgpack" +const PROTOBUF_SERIALIZER = "protobuf" + +func NewAnalyticsSerializer(serializerType string) AnalyticsSerializer { + switch serializerType { + case PROTOBUF_SERIALIZER: + serializer := &ProtobufSerializer{} + log.Debugf("Using serializer %v for analytics \n", PROTOBUF_SERIALIZER) + return serializer + case MSGP_SERIALIZER: + default: + log.Debugf("Using serializer %v for analytics \n", MSGP_SERIALIZER) + } + return &MsgpSerializer{} +} diff --git a/vendor/github.com/akutz/memconn/.gitignore b/vendor/github.com/akutz/memconn/.gitignore new file mode 100644 index 00000000000..8dde3e76ec8 --- /dev/null +++ b/vendor/github.com/akutz/memconn/.gitignore @@ -0,0 +1,414 @@ +*.a +*.out +*.test +*.stderr +*.stdout +*.log +.vscode/ + +# Created by https://www.gitignore.io + +### Windows ### +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk + + +### OSX ### +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +### Eclipse ### +*.pydevproject +.metadata +.gradle +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# CDT-specific +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# PDT-specific +.buildpath + +# sbteclipse plugin +.target + +# TeXlipse plugin +.texlipse + + +### Go ### +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + + +### SublimeText ### +# cache files for sublime text +*.tmlanguage.cache +*.tmPreferences.cache +*.stTheme.cache + +# workspace files are user-specific +*.sublime-workspace + +# project files should be checked into the repository, unless a significant +# proportion of contributors will probably not be using SublimeText +# *.sublime-project + +# sftp configuration file +sftp-config.json + + +### VisualStudio ### +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# User-specific files +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +build/ +bld/ +[Bb]in/ +[Oo]bj/ + +# Visual Studo 2015 cache/options directory +.vs/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUNIT +*.VisualState.xml +TestResult.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +*_i.c +*_p.c +*_i.h +*.ilk +*.meta +*.obj +*.pch +*.pdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opensdf +*.sdf +*.cachefile + +# Visual Studio profiler +*.psess +*.vsp +*.vspx + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# JustCode is a .NET coding addin-in +.JustCode + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# NCrunch +_NCrunch_* +.*crunch*.local.xml + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# TODO: Comment the next line if you want to checkin your web deploy settings +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# NuGet Packages +*.nupkg +# The packages folder can be ignored because of Package Restore +**/packages/* +# except build/, which is used as an MSBuild target. +!**/packages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/packages/repositories.config + +# Windows Azure Build Output +csx/ +*.build.csdef + +# Windows Store app package directory +AppPackages/ + +# Others +*.[Cc]ache +ClientBin/ +[Ss]tyle[Cc]op.* +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.pfx +*.publishsettings +node_modules/ +bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm + +# SQL Server files +*.mdf +*.ldf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings + +# Microsoft Fakes +FakesAssemblies/ + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + + +### Maven ### +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +release.properties +dependency-reduced-pom.xml +buildNumber.properties + + +### Java ### +*.class + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.ear + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + + +### Intellij ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm + +*.iml + +## Directory-based project format: +.idea/ +# if you remove the above rule, at least ignore the following: + +# User-specific stuff: +# .idea/workspace.xml +# .idea/tasks.xml +# .idea/dictionaries + +# Sensitive or high-churn files: +# .idea/dataSources.ids +# .idea/dataSources.xml +# .idea/sqlDataSources.xml +# .idea/dynamic.xml +# .idea/uiDesigner.xml + +# Gradle: +# .idea/gradle.xml +# .idea/libraries + +# Mongo Explorer plugin: +# .idea/mongoSettings.xml + +## File-based project format: +*.ipr +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties diff --git a/vendor/github.com/akutz/memconn/.travis.yml b/vendor/github.com/akutz/memconn/.travis.yml new file mode 100644 index 00000000000..7658106285e --- /dev/null +++ b/vendor/github.com/akutz/memconn/.travis.yml @@ -0,0 +1,21 @@ +# Setting "sudo" to false forces Travis-CI to use its +# container-based build infrastructure, which has shorter +# queue times. +sudo: false + +# Use the newer Travis-CI build templates based on the +# Debian Linux distribution "Trusty" release. +dist: trusty + +# Select Go as the language used to run the buid. +language: go +go: + - 1.8.x + - 1.9.x + - 1.10.x +go_import_path: github.com/akutz/memconn + +install: true +script: + - make test + - make benchmark diff --git a/vendor/github.com/akutz/memconn/LICENSE b/vendor/github.com/akutz/memconn/LICENSE new file mode 100644 index 00000000000..980a15ac24e --- /dev/null +++ b/vendor/github.com/akutz/memconn/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/akutz/memconn/Makefile b/vendor/github.com/akutz/memconn/Makefile new file mode 100644 index 00000000000..0d32c552433 --- /dev/null +++ b/vendor/github.com/akutz/memconn/Makefile @@ -0,0 +1,31 @@ +SHELL := /bin/bash + +all: build + +build: memconn.a +memconn.a: $(filter-out %_test.go, $(wildcard *.go)) + go build -o $@ + +GO_VERSION ?= 1.9.4 +IMPORT_PATH := github.com/akutz/memconn + +docker-run: + docker run --rm -it \ + -v $$(pwd):/go/src/$(IMPORT_PATH) \ + golang:$(GO_VERSION) \ + make -C /go/src/$(IMPORT_PATH) $(MAKE_TARGET) + +BENCH ?= . + +benchmark: + go test -bench $(BENCH) -run Bench -benchmem . + +benchmark-go1.9: + MAKE_TARGET=benchmark $(MAKE) docker-run + +test: + go test + go test -race -run 'Race$$' + +test-go1.9: + MAKE_TARGET=test $(MAKE) docker-run \ No newline at end of file diff --git a/vendor/github.com/akutz/memconn/README.md b/vendor/github.com/akutz/memconn/README.md new file mode 100644 index 00000000000..a5168c3d9b4 --- /dev/null +++ b/vendor/github.com/akutz/memconn/README.md @@ -0,0 +1,38 @@ +# MemConn [![GoDoc](https://godoc.org/github.com/akutz/memconn?status.svg)](http://godoc.org/github.com/akutz/memconn) [![Build Status](http://travis-ci.org/akutz/memconn.svg?branch=master)](https://travis-ci.org/akutz/memconn) [![Go Report Card](http://goreportcard.com/badge/akutz/memconn)](http://goreportcard.com/report/akutz/memconn) +MemConn provides named, in-memory network connections for Go. + +## Create a Server +A new `net.Listener` used to serve HTTP, gRPC, etc. is created with +`memconn.Listen`: + +```go +lis, err := memconn.Listen("memu", "UniqueName") +``` + +## Creating a Client (Dial) +Clients can dial any named connection: + +```go +client, err := memconn.Dial("memu", "UniqueName") +``` + +## Network Types +MemCon supports the following network types: + +| Network | Description | +|---------|-------------| +| `memb` | A buffered, in-memory implementation of `net.Conn` | +| `memu` | An unbuffered, in-memory implementation of `net.Conn` | + +## Performance +The benchmark results illustrate MemConn's performance versus TCP +and UNIX domain sockets: + +![ops](https://imgur.com/o8mXla6.png "Ops (Larger is Better)") +![ns/op](https://imgur.com/8YvPmMU.png "Nanoseconds/Op (Smaller is Better)") +![B/op](https://imgur.com/vQSfIR2.png "Bytes/Op (Smaller is Better)") +![allocs/op](https://imgur.com/k263257.png "Allocs/Op (Smaller is Better)") + +MemConn is more performant than TCP and UNIX domain sockets with respect +to the CPU. While MemConn does allocate more memory, this is to be expected +since MemConn is an in-memory implementation of the `net.Conn` interface. diff --git a/vendor/github.com/akutz/memconn/VERSION b/vendor/github.com/akutz/memconn/VERSION new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/vendor/github.com/akutz/memconn/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/vendor/github.com/akutz/memconn/memconn.go b/vendor/github.com/akutz/memconn/memconn.go new file mode 100644 index 00000000000..ae7b5442575 --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn.go @@ -0,0 +1,110 @@ +package memconn + +import ( + "context" + "net" +) + +const ( + // networkMemb is a buffered network connection. Write operations + // do not block as they are are buffered instead of waiting on a + // matching Read operation. + networkMemb = "memb" + + // networkMemu is an unbuffered network connection. Write operations + // block until they are matched by a Read operation on the other side + // of the connected pipe. + networkMemu = "memu" + + // addrLocalhost is a reserved address name. It is used when a + // Listen variant omits the local address or a Dial variant omits + // the remote address. + addrLocalhost = "localhost" +) + +// provider is the package's default provider instance. All of the +// package-level functions interact with this object. +var provider Provider + +// MapNetwork enables mapping the network value provided to this Provider's +// Dial and Listen functions from the specified "from" value to the +// specified "to" value. +// +// For example, calling MapNetwork("tcp", "memu") means a subsequent +// Dial("tcp", "address") gets translated to Dial("memu", "address"). +// +// Calling MapNetwork("tcp", "") removes any previous translation for +// the "tcp" network. +func MapNetwork(from, to string) { + provider.MapNetwork(from, to) +} + +// Listen begins listening at address for the specified network. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// When the specified address is already in use on the specified +// network an error is returned. +// +// When the provided network is unknown the operation defers to +// net.Dial. +func Listen(network, address string) (net.Listener, error) { + return provider.Listen(network, address) +} + +// ListenMem begins listening at laddr. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// If laddr is nil then ListenMem listens on "localhost" on the +// specified network. +func ListenMem(network string, laddr *Addr) (*Listener, error) { + return provider.ListenMem(network, laddr) +} + +// Dial dials a named connection. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// When the provided network is unknown the operation defers to +// net.Dial. +func Dial(network, address string) (net.Conn, error) { + return provider.Dial(network, address) +} + +// DialContext dials a named connection using a +// Go context to provide timeout behavior. +// +// Please see Dial for more information. +func DialContext( + ctx context.Context, + network, address string) (net.Conn, error) { + + return provider.DialContext(ctx, network, address) +} + +// DialMem dials a named connection. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// If laddr is nil then a new address is generated using +// time.Now().UnixNano(). Please note that client addresses are +// not required to be unique. +// +// If raddr is nil then the "localhost" endpoint is used on the +// specified network. +func DialMem(network string, laddr, raddr *Addr) (*Conn, error) { + return provider.DialMem(network, laddr, raddr) +} + +// DialMemContext dials a named connection using a +// Go context to provide timeout behavior. +// +// Please see DialMem for more information. +func DialMemContext( + ctx context.Context, + network string, + laddr, raddr *Addr) (*Conn, error) { + + return provider.DialMemContext(ctx, network, laddr, raddr) +} diff --git a/vendor/github.com/akutz/memconn/memconn_addr.go b/vendor/github.com/akutz/memconn/memconn_addr.go new file mode 100644 index 00000000000..6f417877a05 --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn_addr.go @@ -0,0 +1,25 @@ +package memconn + +// Addr represents the address of an in-memory endpoint. +type Addr struct { + // Name is the name of the endpoint. + Name string + + network string +} + +// Buffered indicates whether or not the address refers to a buffered +// network type. +func (a Addr) Buffered() bool { + return a.network == networkMemb +} + +// Network returns the address's network. +func (a Addr) Network() string { + return a.network +} + +// String returns the address's name. +func (a Addr) String() string { + return a.Name +} diff --git a/vendor/github.com/akutz/memconn/memconn_conn.go b/vendor/github.com/akutz/memconn/memconn_conn.go new file mode 100644 index 00000000000..21e2e71b439 --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn_conn.go @@ -0,0 +1,434 @@ +package memconn + +import ( + "net" + "sync" + "time" +) + +// Conn is an in-memory implementation of Golang's "net.Conn" interface. +type Conn struct { + pipe + + laddr Addr + raddr Addr + + // buf contains information about the connection's buffer state if + // the connection is buffered. Otherwise this field is nil. + buf *bufConn +} + +type bufConn struct { + // Please see the SetCopyOnWrite function for more information. + cow bool + + // Please see the SetBufferSize function for more information. + max uint64 + + // cur is the amount of buffered, pending Write data + cur uint64 + + // cond is a condition used to wait when writing buffered data + cond sync.Cond + + // mu is the mutex used by the condition. The mutex is exposed + // directly in order to access RLock and RUnlock for getting the + // buffer size. + mu sync.RWMutex + + // errs is the error channel returned by the Errs() function and + // used to report erros that occur as a result of buffered write + // operations. If the pipe does not use buffered writes then this + // field will always be nil. + errs chan error + + // Please see the SetCloseTimeout function for more information. + closeTimeout time.Duration +} + +func makeNewConns(network string, laddr, raddr Addr) (*Conn, *Conn) { + // This code is duplicated from the Pipe() function from the file + // "memconn_pipe.go". The reason for the duplication is to optimize + // the performance by removing the need to wrap the *pipe values as + // interface{} objects out of the Pipe() function and assert them + // back as *pipe* objects in this function. + cb1 := make(chan []byte) + cb2 := make(chan []byte) + cn1 := make(chan int) + cn2 := make(chan int) + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + // Wrap the pipes with Conn to support: + // + // * The correct address information for the functions LocalAddr() + // and RemoteAddr() return the + // * Errors returns from the internal pipe are checked and + // have their internal OpError addr information replaced with + // the correct address information. + // * A channel can be setup to cause the event of the Listener + // closing closes the remoteConn immediately. + // * Buffered writes + local := &Conn{ + pipe: pipe{ + rdRx: cb1, rdTx: cn1, + wrTx: cb2, wrRx: cn2, + localDone: done1, remoteDone: done2, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + }, + laddr: laddr, + raddr: raddr, + } + remote := &Conn{ + pipe: pipe{ + rdRx: cb2, rdTx: cn2, + wrTx: cb1, wrRx: cn1, + localDone: done2, remoteDone: done1, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + }, + laddr: raddr, + raddr: laddr, + } + + if laddr.Buffered() { + local.buf = &bufConn{ + errs: make(chan error), + closeTimeout: 10 * time.Second, + } + local.buf.cond.L = &local.buf.mu + } + + if raddr.Buffered() { + remote.buf = &bufConn{ + errs: make(chan error), + closeTimeout: 10 * time.Second, + } + remote.buf.cond.L = &remote.buf.mu + } + + return local, remote +} + +// LocalBuffered returns a flag indicating whether or not the local side +// of the connection is buffered. +func (c *Conn) LocalBuffered() bool { + return c.laddr.Buffered() +} + +// RemoteBuffered returns a flag indicating whether or not the remote side +// of the connection is buffered. +func (c *Conn) RemoteBuffered() bool { + return c.raddr.Buffered() +} + +// BufferSize gets the number of bytes allowed to be queued for +// asynchrnous Write operations. +// +// Please note that this function will always return zero for unbuffered +// connections. +// +// Please see the function SetBufferSize for more information. +func (c *Conn) BufferSize() uint64 { + if c.laddr.Buffered() { + c.buf.mu.RLock() + defer c.buf.mu.RUnlock() + return c.buf.max + } + return 0 +} + +// SetBufferSize sets the number of bytes allowed to be queued for +// asynchronous Write operations. Once the amount of data pending a Write +// operation exceeds the specified size, subsequent Writes will +// block until the queued data no longer exceeds the allowed ceiling. +// +// A value of zero means no maximum is defined. +// +// If a Write operation's payload length exceeds the buffer size +// (except for zero) then the Write operation is handled synchronously. +// +// Please note that setting the buffer size has no effect on unbuffered +// connections. +func (c *Conn) SetBufferSize(i uint64) { + if c.laddr.Buffered() { + c.buf.cond.L.Lock() + defer c.buf.cond.L.Unlock() + c.buf.max = i + } +} + +// CloseTimeout gets the time.Duration value used when closing buffered +// connections. +// +// Please note that this function will always return zero for +// unbuffered connections. +// +// Please see the function SetCloseTimeout for more information. +func (c *Conn) CloseTimeout() time.Duration { + if c.laddr.Buffered() { + c.buf.mu.RLock() + defer c.buf.mu.RUnlock() + return c.buf.closeTimeout + } + return 0 +} + +// SetCloseTimeout sets a time.Duration value used by the Close function +// to determine the amount of time to wait for pending, buffered Writes +// to complete before closing the connection. +// +// The default timeout value is 10 seconds. A zero value does not +// mean there is no timeout, rather it means the timeout is immediate. +// +// Please note that setting this value has no effect on unbuffered +// connections. +func (c *Conn) SetCloseTimeout(duration time.Duration) { + if c.laddr.Buffered() { + c.buf.cond.L.Lock() + defer c.buf.cond.L.Unlock() + c.buf.closeTimeout = duration + } +} + +// CopyOnWrite gets a flag indicating whether or not copy-on-write is +// enabled for this connection. +// +// Please note that this function will always return false for +// unbuffered connections. +// +// Please see the function SetCopyOnWrite for more information. +func (c *Conn) CopyOnWrite() bool { + if c.laddr.Buffered() { + c.buf.mu.RLock() + defer c.buf.mu.RUnlock() + return c.buf.cow + } + return false +} + +// SetCopyOnWrite sets a flag indicating whether or not copy-on-write +// is enabled for this connection. +// +// When a connection is buffered, data submitted to a Write operation +// is processed in a goroutine and the function returns control to the +// caller immediately. Because of this, it's possible to modify the +// data provided to the Write function before or during the actual +// Write operation. Enabling copy-on-write causes the payload to be +// copied to a new buffer before control is returned to the caller. +// +// Please note that enabling copy-on-write will double the amount of +// memory required for all Write operations. +// +// Please note that enabling copy-on-write has no effect on unbuffered +// connections. +func (c *Conn) SetCopyOnWrite(enabled bool) { + if c.laddr.Buffered() { + c.buf.cond.L.Lock() + defer c.buf.cond.L.Unlock() + c.buf.cow = enabled + } +} + +// LocalAddr implements the net.Conn LocalAddr method. +func (c *Conn) LocalAddr() net.Addr { + return c.laddr +} + +// RemoteAddr implements the net.Conn RemoteAddr method. +func (c *Conn) RemoteAddr() net.Addr { + return c.raddr +} + +// Close implements the net.Conn Close method. +func (c *Conn) Close() error { + c.pipe.once.Do(func() { + + // Buffered connections will attempt to wait until all + // pending Writes are completed, until the specified + // timeout value has elapsed, or until the remote side + // of the connection is closed. + if c.laddr.Buffered() { + c.buf.mu.RLock() + timeout := c.buf.closeTimeout + c.buf.mu.RUnlock() + + // Set up a channel that is closed when the specified + // timer elapses. + timeoutDone := make(chan struct{}) + if timeout == 0 { + close(timeoutDone) + } else { + time.AfterFunc(timeout, func() { close(timeoutDone) }) + } + + // Set up a channel that is closed when the number of + // pending bytes is zero. + writesDone := make(chan struct{}) + go func() { + c.buf.cond.L.Lock() + for c.buf.cur > 0 { + c.buf.cond.Wait() + } + close(writesDone) + c.buf.cond.L.Unlock() + }() + + // Wait to close the connection. + select { + case <-writesDone: + case <-timeoutDone: + case <-c.pipe.remoteDone: + } + } + + close(c.pipe.localDone) + }) + return nil +} + +// Errs returns a channel that receives errors that may occur as the +// result of buffered write operations. +// +// This function will always return nil for unbuffered connections. +// +// Please note that the channel returned by this function is not closed +// when the connection is closed. This is because errors may continue +// to be sent over this channel as the result of asynchronous writes +// occurring after the connection is closed. Therefore this channel +// should not be used to determine when the connection is closed. +func (c *Conn) Errs() <-chan error { + return c.buf.errs +} + +// Read implements the net.Conn Read method. +func (c *Conn) Read(b []byte) (int, error) { + n, err := c.pipe.Read(b) + if err != nil { + if e, ok := err.(*net.OpError); ok { + e.Addr = c.raddr + e.Source = c.laddr + return n, e + } + return n, &net.OpError{ + Op: "read", + Addr: c.raddr, + Source: c.laddr, + Net: c.raddr.Network(), + Err: err, + } + } + return n, nil +} + +// Write implements the net.Conn Write method. +func (c *Conn) Write(b []byte) (int, error) { + if c.laddr.Buffered() { + return c.writeAsync(b) + } + return c.writeSync(b) +} + +func (c *Conn) writeSync(b []byte) (int, error) { + n, err := c.pipe.Write(b) + if err != nil { + if e, ok := err.(*net.OpError); ok { + e.Addr = c.raddr + e.Source = c.laddr + return n, e + } + return n, &net.OpError{ + Op: "write", + Addr: c.raddr, + Source: c.laddr, + Net: c.raddr.Network(), + Err: err, + } + } + return n, nil +} + +// writeAsync performs the Write operation in a goroutine. This +// behavior means the Write operation is not blocking, but also means +// that when Write operations fail the associated error is not returned +// from this function. +func (c *Conn) writeAsync(b []byte) (int, error) { + // Perform a synchronous Write if the connection has a non-zero + // value for the maximum allowed buffer size and if the size of + // the payload exceeds that maximum value. + if c.buf.max > 0 && uint64(len(b)) > c.buf.max { + return c.writeSync(b) + } + + // Block the operation from proceeding until there is available + // buffer space. + c.buf.cond.L.Lock() + for c.buf.max > 0 && uint64(len(b))+c.buf.cur > c.buf.max { + c.buf.cond.Wait() + } + + // Copy the buffer if the connection uses copy-on-write. + cb := b + if c.buf.cow { + cb = make([]byte, len(b)) + copy(cb, b) + } + + // Update the amount of active data being written. + c.buf.cur = c.buf.cur + uint64(len(cb)) + + c.buf.cond.L.Unlock() + + go func() { + if _, err := c.writeSync(cb); err != nil { + go func() { c.buf.errs <- err }() + } + + // Decrement the enqueued buffer size and signal a blocked + // goroutine that it may proceed + c.buf.cond.L.Lock() + c.buf.cur = c.buf.cur - uint64(len(cb)) + c.buf.cond.L.Unlock() + c.buf.cond.Signal() + }() + return len(cb), nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (c *Conn) SetReadDeadline(t time.Time) error { + if err := c.pipe.SetReadDeadline(t); err != nil { + if e, ok := err.(*net.OpError); ok { + e.Addr = c.laddr + e.Source = c.laddr + return e + } + return &net.OpError{ + Op: "setReadDeadline", + Addr: c.laddr, + Source: c.laddr, + Net: c.laddr.Network(), + Err: err, + } + } + return nil +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (c *Conn) SetWriteDeadline(t time.Time) error { + if err := c.pipe.SetWriteDeadline(t); err != nil { + if e, ok := err.(*net.OpError); ok { + e.Addr = c.laddr + e.Source = c.laddr + return e + } + return &net.OpError{ + Op: "setWriteDeadline", + Addr: c.laddr, + Source: c.laddr, + Net: c.laddr.Network(), + Err: err, + } + } + return nil +} diff --git a/vendor/github.com/akutz/memconn/memconn_listener.go b/vendor/github.com/akutz/memconn/memconn_listener.go new file mode 100644 index 00000000000..91b5972851c --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn_listener.go @@ -0,0 +1,105 @@ +package memconn + +import ( + "context" + "errors" + "net" + "sync" +) + +// Listener implements the net.Listener interface. +type Listener struct { + addr Addr + once sync.Once + rcvr chan *Conn + done chan struct{} + rmvd chan struct{} +} + +func (l *Listener) dial( + ctx context.Context, + network string, + laddr, raddr Addr) (*Conn, error) { + + local, remote := makeNewConns(network, laddr, raddr) + + // TODO Figure out if this logic is valid. + // + // Start a goroutine that closes the remote side of the connection + // as soon as the listener's done channel is no longer blocked. + //go func() { + // <-l.done + // remoteConn.Close() + //}() + + // If the provided context is nill then announce a new connection + // by placing the new remoteConn onto the rcvr channel. An Accept + // call from this listener will remove the remoteConn from the channel. + if ctx == nil { + l.rcvr <- remote + return local, nil + } + + // Announce a new connection by placing the new remoteConn + // onto the rcvr channel. An Accept call from this listener will + // remove the remoteConn from the channel. However, if that does + // not occur by the time the context times out / is cancelled, then + // an error is returned. + select { + case l.rcvr <- remote: + return local, nil + case <-ctx.Done(): + local.Close() + remote.Close() + return nil, &net.OpError{ + Addr: raddr, + Source: laddr, + Net: network, + Op: "dial", + Err: ctx.Err(), + } + } +} + +// Accept implements the net.Listener Accept method. +func (l *Listener) Accept() (net.Conn, error) { + return l.AcceptMemConn() +} + +// AcceptMemConn implements the net.Listener Accept method logic and +// returns a *memconn.Conn object. +func (l *Listener) AcceptMemConn() (*Conn, error) { + select { + case remoteConn, ok := <-l.rcvr: + if ok { + return remoteConn, nil + } + return nil, &net.OpError{ + Addr: l.addr, + Source: l.addr, + Net: l.addr.Network(), + Err: errors.New("listener closed"), + } + case <-l.done: + return nil, &net.OpError{ + Addr: l.addr, + Source: l.addr, + Net: l.addr.Network(), + Err: errors.New("listener closed"), + } + } +} + +// Close implements the net.Listener Close method. +func (l *Listener) Close() error { + l.once.Do(func() { + close(l.done) + <-l.rmvd + }) + return nil +} + +// Addr implements the net.Listener Addr method. +func (l *Listener) Addr() net.Addr { + return l.addr +} diff --git a/vendor/github.com/akutz/memconn/memconn_pipe.go b/vendor/github.com/akutz/memconn/memconn_pipe.go new file mode 100644 index 00000000000..a3a2a29fea5 --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn_pipe.go @@ -0,0 +1,265 @@ +// This file was copied from Go stdlib "net/pipe.go" +// and modified in order to optimally support: +// +// * Buffered writes +// * Custom local and remote address values +// * Error values that follow net.Conn's rules regarding +// net.OpError +// +// The above features could be implemented using the "net.Conn" values +// returned from the function "net.Pipe", but much of the same code +// would need to be duplicated regarding deadlines, done semantics, etc. +// Using the private "pipe" struct as the basis of a new, composite type +// is much more performant. +// +// FYI, the reason a new, composite type is used instead of modifying +// the existing type, "pipe", is to make it easier to replace this +// file with whatever changes Go stdlib make make to "net/pipe.go" in +// the future. +// +// This file is a Golang stdlib type and so the Go license is included: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package memconn + +import ( + "io" + "net" + "sync" + "time" +) + +// pipeDeadline is an abstraction for handling timeouts. +type pipeDeadline struct { + mu sync.Mutex // Guards timer and cancel + timer *time.Timer + cancel chan struct{} // Must be non-nil +} + +func makePipeDeadline() pipeDeadline { + return pipeDeadline{cancel: make(chan struct{})} +} + +// set sets the point in time when the deadline will time out. +// A timeout event is signaled by closing the channel returned by waiter. +// Once a timeout has occurred, the deadline can be refreshed by specifying a +// t value in the future. +// +// A zero value for t prevents timeout. +func (d *pipeDeadline) set(t time.Time) { + d.mu.Lock() + defer d.mu.Unlock() + + if d.timer != nil && !d.timer.Stop() { + <-d.cancel // Wait for the timer callback to finish and close cancel + } + d.timer = nil + + // Time is zero, then there is no deadline. + closed := isClosedChan(d.cancel) + if t.IsZero() { + if closed { + d.cancel = make(chan struct{}) + } + return + } + + // Time in the future, setup a timer to cancel in the future. + if dur := time.Until(t); dur > 0 { + if closed { + d.cancel = make(chan struct{}) + } + d.timer = time.AfterFunc(dur, func() { + close(d.cancel) + }) + return + } + + // Time in the past, so close immediately. + if !closed { + close(d.cancel) + } +} + +// wait returns a channel that is closed when the deadline is exceeded. +func (d *pipeDeadline) wait() chan struct{} { + d.mu.Lock() + defer d.mu.Unlock() + return d.cancel +} + +func isClosedChan(c <-chan struct{}) bool { + select { + case <-c: + return true + default: + return false + } +} + +type timeoutError struct{} + +func (timeoutError) Error() string { return "deadline exceeded" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +type pipeAddr struct{} + +func (pipeAddr) Network() string { return "pipe" } +func (pipeAddr) String() string { return "pipe" } + +type pipe struct { + wrMu sync.Mutex // Serialize Write operations + + // Used by local Read to interact with remote Write. + // Successful receive on rdRx is always followed by send on rdTx. + rdRx <-chan []byte + rdTx chan<- int + + // Used by local Write to interact with remote Read. + // Successful send on wrTx is always followed by receive on wrRx. + wrTx chan<- []byte + wrRx <-chan int + + once sync.Once // Protects closing localDone + localDone chan struct{} + remoteDone <-chan struct{} + + readDeadline pipeDeadline + writeDeadline pipeDeadline +} + +// Pipe creates a synchronous, in-memory, full duplex +// network connection; both ends implement the Conn interface. +// Reads on one end are matched with writes on the other, +// copying data directly between the two; there is no internal +// buffering. +func Pipe() (net.Conn, net.Conn) { + cb1 := make(chan []byte) + cb2 := make(chan []byte) + cn1 := make(chan int) + cn2 := make(chan int) + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + p1 := &pipe{ + rdRx: cb1, rdTx: cn1, + wrTx: cb2, wrRx: cn2, + localDone: done1, remoteDone: done2, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + } + p2 := &pipe{ + rdRx: cb2, rdTx: cn2, + wrTx: cb1, wrRx: cn1, + localDone: done2, remoteDone: done1, + readDeadline: makePipeDeadline(), + writeDeadline: makePipeDeadline(), + } + return p1, p2 +} + +func (*pipe) LocalAddr() net.Addr { return pipeAddr{} } +func (*pipe) RemoteAddr() net.Addr { return pipeAddr{} } + +func (p *pipe) Read(b []byte) (int, error) { + n, err := p.read(b) + if err != nil && err != io.EOF && err != io.ErrClosedPipe { + err = &net.OpError{Op: "read", Net: "pipe", Err: err} + } + return n, err +} + +func (p *pipe) read(b []byte) (n int, err error) { + switch { + case isClosedChan(p.localDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.remoteDone): + return 0, io.EOF + case isClosedChan(p.readDeadline.wait()): + return 0, timeoutError{} + } + + select { + case bw := <-p.rdRx: + nr := copy(b, bw) + p.rdTx <- nr + return nr, nil + case <-p.localDone: + return 0, io.ErrClosedPipe + case <-p.remoteDone: + return 0, io.EOF + case <-p.readDeadline.wait(): + return 0, timeoutError{} + } +} + +func (p *pipe) Write(b []byte) (int, error) { + n, err := p.write(b) + if err != nil && err != io.ErrClosedPipe { + err = &net.OpError{Op: "write", Net: "pipe", Err: err} + } + return n, err +} + +func (p *pipe) write(b []byte) (n int, err error) { + switch { + case isClosedChan(p.localDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.remoteDone): + return 0, io.ErrClosedPipe + case isClosedChan(p.writeDeadline.wait()): + return 0, timeoutError{} + } + + p.wrMu.Lock() // Ensure entirety of b is written together + defer p.wrMu.Unlock() + for once := true; once || len(b) > 0; once = false { + select { + case p.wrTx <- b: + nw := <-p.wrRx + b = b[nw:] + n += nw + case <-p.localDone: + return n, io.ErrClosedPipe + case <-p.remoteDone: + return n, io.ErrClosedPipe + case <-p.writeDeadline.wait(): + return n, timeoutError{} + } + } + return n, nil +} + +func (p *pipe) SetDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.readDeadline.set(t) + p.writeDeadline.set(t) + return nil +} + +func (p *pipe) SetReadDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.readDeadline.set(t) + return nil +} + +func (p *pipe) SetWriteDeadline(t time.Time) error { + if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) { + return io.ErrClosedPipe + } + p.writeDeadline.set(t) + return nil +} + +func (p *pipe) Close() error { + p.once.Do(func() { close(p.localDone) }) + return nil +} diff --git a/vendor/github.com/akutz/memconn/memconn_provider.go b/vendor/github.com/akutz/memconn/memconn_provider.go new file mode 100644 index 00000000000..ec9d7678780 --- /dev/null +++ b/vendor/github.com/akutz/memconn/memconn_provider.go @@ -0,0 +1,245 @@ +package memconn + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" +) + +// Provider is used to track named MemConn objects. +type Provider struct { + nets networkMap + listeners listenerCache +} + +type listenerCache struct { + sync.RWMutex + cache map[string]*Listener +} + +type networkMap struct { + sync.RWMutex + cache map[string]string +} + +// MapNetwork enables mapping the network value provided to this Provider's +// Dial and Listen functions from the specified "from" value to the +// specified "to" value. +// +// For example, calling MapNetwork("tcp", "memu") means a subsequent +// Dial("tcp", "address") gets translated to Dial("memu", "address"). +// +// Calling MapNetwork("tcp", "") removes any previous translation for +// the "tcp" network. +func (p *Provider) MapNetwork(from, to string) { + p.nets.Lock() + defer p.nets.Unlock() + if p.nets.cache == nil { + p.nets.cache = map[string]string{} + } + if to == "" { + delete(p.nets.cache, from) + return + } + p.nets.cache[from] = to +} + +func (p *Provider) mapNetwork(network string) string { + p.nets.RLock() + defer p.nets.RUnlock() + if to, ok := p.nets.cache[network]; ok { + return to + } + return network +} + +// Listen begins listening at address for the specified network. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// When the specified address is already in use on the specified +// network an error is returned. +// +// When the provided network is unknown the operation defers to +// net.Dial. +func (p *Provider) Listen(network, address string) (net.Listener, error) { + switch p.mapNetwork(network) { + case networkMemb, networkMemu: + return p.ListenMem( + network, &Addr{Name: address, network: network}) + default: + return net.Listen(network, address) + } +} + +// ListenMem begins listening at laddr. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// If laddr is nil then ListenMem listens on "localhost" on the +// specified network. +func (p *Provider) ListenMem(network string, laddr *Addr) (*Listener, error) { + + switch p.mapNetwork(network) { + case networkMemb, networkMemu: + // If laddr is not specified then set it to the reserved name + // "localhost". + if laddr == nil { + laddr = &Addr{Name: addrLocalhost, network: network} + } else { + laddr.network = network + } + default: + return nil, &net.OpError{ + Addr: laddr, + Source: laddr, + Net: network, + Op: "listen", + Err: errors.New("unknown network"), + } + } + + p.listeners.Lock() + defer p.listeners.Unlock() + + if p.listeners.cache == nil { + p.listeners.cache = map[string]*Listener{} + } + + if _, ok := p.listeners.cache[laddr.Name]; ok { + return nil, &net.OpError{ + Addr: laddr, + Source: laddr, + Net: network, + Op: "listen", + Err: errors.New("addr unavailable"), + } + } + + l := &Listener{ + addr: *laddr, + done: make(chan struct{}), + rmvd: make(chan struct{}), + rcvr: make(chan *Conn, 1), + } + + // Start a goroutine that removes the listener from + // the cache once the listener is closed. + go func() { + <-l.done + p.listeners.Lock() + defer p.listeners.Unlock() + delete(p.listeners.cache, laddr.Name) + close(l.rmvd) + }() + + p.listeners.cache[laddr.Name] = l + return l, nil +} + +// Dial dials a named connection. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// When the provided network is unknown the operation defers to +// net.Dial. +func (p *Provider) Dial(network, address string) (net.Conn, error) { + return p.DialContext(nil, network, address) +} + +// DialMem dials a named connection. +// +// Known networks are "memb" (memconn buffered) and "memu" (memconn unbuffered). +// +// If laddr is nil then a new address is generated using +// time.Now().UnixNano(). Please note that client addresses are +// not required to be unique. +// +// If raddr is nil then the "localhost" endpoint is used on the +// specified network. +func (p *Provider) DialMem( + network string, laddr, raddr *Addr) (*Conn, error) { + + return p.DialMemContext(nil, network, laddr, raddr) +} + +// DialContext dials a named connection using a +// Go context to provide timeout behavior. +// +// Please see Dial for more information. +func (p *Provider) DialContext( + ctx context.Context, + network, address string) (net.Conn, error) { + + switch p.mapNetwork(network) { + case networkMemb, networkMemu: + return p.DialMemContext( + ctx, network, nil, &Addr{ + Name: address, + network: network, + }) + default: + if ctx == nil { + return net.Dial(network, address) + } + return (&net.Dialer{}).DialContext(ctx, network, address) + } +} + +// DialMemContext dials a named connection using a +// Go context to provide timeout behavior. +// +// Please see DialMem for more information. +func (p *Provider) DialMemContext( + ctx context.Context, + network string, + laddr, raddr *Addr) (*Conn, error) { + + switch p.mapNetwork(network) { + case networkMemb, networkMemu: + // If laddr is not specified then create one with the current + // epoch in nanoseconds. This value need not be unique. + if laddr == nil { + laddr = &Addr{ + Name: fmt.Sprintf("%d", time.Now().UnixNano()), + network: network, + } + } else { + laddr.network = network + } + if raddr == nil { + raddr = &Addr{Name: addrLocalhost, network: network} + } else { + raddr.network = network + } + default: + return nil, &net.OpError{ + Addr: raddr, + Source: laddr, + Net: network, + Op: "dial", + Err: errors.New("unknown network"), + } + } + + p.listeners.RLock() + defer p.listeners.RUnlock() + + if l, ok := p.listeners.cache[raddr.Name]; ok { + // Update the provided raddr with the actual network type used + // by the listener. + raddr.network = l.addr.network + return l.dial(ctx, network, *laddr, *raddr) + } + + return nil, &net.OpError{ + Addr: raddr, + Source: laddr, + Net: network, + Op: "dial", + Err: errors.New("unknown remote address"), + } +} diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md new file mode 100644 index 00000000000..ef6a8ee303e --- /dev/null +++ b/vendor/github.com/alecthomas/template/README.md @@ -0,0 +1,25 @@ +# Go's `text/template` package with newline elision + +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. + +eg. + +``` +{{if true}}\ +hello +{{end}}\ +``` + +Will result in: + +``` +hello\n +``` + +Rather than: + +``` +\n +hello\n +\n +``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go new file mode 100644 index 00000000000..223c595c25d --- /dev/null +++ b/vendor/github.com/alecthomas/template/doc.go @@ -0,0 +1,406 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go new file mode 100644 index 00000000000..c3078e5d0c0 --- /dev/null +++ b/vendor/github.com/alecthomas/template/exec.go @@ -0,0 +1,845 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go new file mode 100644 index 00000000000..39ee5ed68fb --- /dev/null +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -0,0 +1,598 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod new file mode 100644 index 00000000000..a70670ae21c --- /dev/null +++ b/vendor/github.com/alecthomas/template/go.mod @@ -0,0 +1 @@ +module github.com/alecthomas/template diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go new file mode 100644 index 00000000000..3636fb54d69 --- /dev/null +++ b/vendor/github.com/alecthomas/template/helper.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go new file mode 100644 index 00000000000..55f1c051e86 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -0,0 +1,556 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go new file mode 100644 index 00000000000..55c37f6dbac --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -0,0 +1,834 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go new file mode 100644 index 00000000000..0d77ade8718 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -0,0 +1,700 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go new file mode 100644 index 00000000000..447ed2abaea --- /dev/null +++ b/vendor/github.com/alecthomas/template/template.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING new file mode 100644 index 00000000000..2993ec085d3 --- /dev/null +++ b/vendor/github.com/alecthomas/units/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md new file mode 100644 index 00000000000..bee884e3c1c --- /dev/null +++ b/vendor/github.com/alecthomas/units/README.md @@ -0,0 +1,11 @@ +# Units - Helpful unit multipliers and functions for Go + +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. + +It allows for code like this: + +```go +n, err := ParseBase2Bytes("1KB") +// n == 1024 +n = units.Mebibyte * 512 +``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go new file mode 100644 index 00000000000..61d0ca479ab --- /dev/null +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -0,0 +1,85 @@ +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +var ( + metricBytesUnitMap = MakeUnitMap("B", "B", 1000) +) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and kB, KB represent 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go new file mode 100644 index 00000000000..156ae386723 --- /dev/null +++ b/vendor/github.com/alecthomas/units/doc.go @@ -0,0 +1,13 @@ +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/go.mod b/vendor/github.com/alecthomas/units/go.mod new file mode 100644 index 00000000000..c7fb91f2b27 --- /dev/null +++ b/vendor/github.com/alecthomas/units/go.mod @@ -0,0 +1,3 @@ +module github.com/alecthomas/units + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/alecthomas/units/go.sum b/vendor/github.com/alecthomas/units/go.sum new file mode 100644 index 00000000000..8fdee5854f1 --- /dev/null +++ b/vendor/github.com/alecthomas/units/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go new file mode 100644 index 00000000000..99b2fa4fcb0 --- /dev/null +++ b/vendor/github.com/alecthomas/units/si.go @@ -0,0 +1,50 @@ +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + res := map[string]float64{ + shortSuffix: 1, + // see below for "k" / "K" + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } + + // Standard SI prefixes use lowercase "k" for kilo = 1000. + // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. + // + // However, official binary prefixes are always capitalized - "KiB" - + // and we specifically never parse "kB" as 1024B because: + // + // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) + // + // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: + // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an + // uppercase letter K." + // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) + // "Capitalization of the letter K became the de facto standard for binary notation, although this + // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" + // -- https://en.wikipedia.org/wiki/Binary_prefix#History + // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. + if scale == 1024 { + res["K"+suffix] = float64(scale) + } else { + res["k"+suffix] = float64(scale) + res["K"+suffix] = float64(scale) + } + return res +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go new file mode 100644 index 00000000000..6527e92d164 --- /dev/null +++ b/vendor/github.com/alecthomas/units/util.go @@ -0,0 +1,138 @@ +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/andybalholm/brotli/LICENSE b/vendor/github.com/andybalholm/brotli/LICENSE new file mode 100644 index 00000000000..33b7cdd2dba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md new file mode 100644 index 00000000000..1ea7fdb759d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/README.md @@ -0,0 +1,7 @@ +This package is a brotli compressor and decompressor implemented in Go. +It was translated from the reference implementation (https://github.com/google/brotli) +with the `c2go` tool at https://github.com/andybalholm/c2go. + +I am using it in production with https://github.com/andybalholm/redwood. + +API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/backward_references.go b/vendor/github.com/andybalholm/brotli/backward_references.go new file mode 100644 index 00000000000..008c054d1c0 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references.go @@ -0,0 +1,185 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find backward reference copies. */ + +func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint { + if distance <= max_distance { + var distance_plus_3 uint = distance + 3 + var offset0 uint = distance_plus_3 - uint(dist_cache[0]) + var offset1 uint = distance_plus_3 - uint(dist_cache[1]) + if distance == uint(dist_cache[0]) { + return 0 + } else if distance == uint(dist_cache[1]) { + return 1 + } else if offset0 < 7 { + return (0x9750468 >> (4 * offset0)) & 0xF + } else if offset1 < 7 { + return (0xFDB1ACE >> (4 * offset1)) & 0xF + } else if distance == uint(dist_cache[2]) { + return 2 + } else if distance == uint(dist_cache[3]) { + return 3 + } + } + + return distance + numDistanceShortCodes - 1 +} + +var hasherSearchResultPool sync.Pool + +func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var insert_length uint = *last_insert_len + var pos_end uint = position + num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params) + var apply_random_heuristics uint = position + random_heuristics_window_size + var gap uint = 0 + /* Set maximum distance, see section 9.1. of the spec. */ + + const kMinScore uint = scoreBase + 100 + + /* For speed up heuristics for random data. */ + + /* Minimum score to accept a backward reference. */ + hasher.PrepareDistanceCache(dist_cache) + sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr2 == nil { + sr2 = &hasherSearchResult{} + } + sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr == nil { + sr = &hasherSearchResult{} + } + + for position+hasher.HashTypeLength() < pos_end { + var max_length uint = pos_end - position + var max_distance uint = brotli_min_size_t(position, max_backward_limit) + sr.len = 0 + sr.len_code_delta = 0 + sr.distance = 0 + sr.score = kMinScore + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr) + if sr.score > kMinScore { + /* Found a match. Let's look for something even better ahead. */ + var delayed_backward_references_in_row int = 0 + max_length-- + for ; ; max_length-- { + var cost_diff_lazy uint = 175 + if params.quality < minQualityForExtensiveReferenceSearch { + sr2.len = brotli_min_size_t(sr.len-1, max_length) + } else { + sr2.len = 0 + } + sr2.len_code_delta = 0 + sr2.distance = 0 + sr2.score = kMinScore + max_distance = brotli_min_size_t(position+1, max_backward_limit) + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2) + if sr2.score >= sr.score+cost_diff_lazy { + /* Ok, let's just write one byte for now and start a match from the + next byte. */ + position++ + + insert_length++ + *sr = *sr2 + delayed_backward_references_in_row++ + if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end { + continue + } + } + + break + } + + apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size + max_distance = brotli_min_size_t(position, max_backward_limit) + { + /* The first 16 codes are special short-codes, + and the minimum offset is 1. */ + var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache) + if (sr.distance <= (max_distance + gap)) && distance_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(sr.distance) + hasher.PrepareDistanceCache(dist_cache) + } + + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)) + } + + *num_literals += insert_length + insert_length = 0 + /* Put the hash keys into the table, if there are enough bytes left. + Depending on the hasher implementation, it can push all positions + in the given range or only a subset of them. + Avoid hash poisoning with RLE data. */ + { + var range_start uint = position + 2 + var range_end uint = brotli_min_size_t(position+sr.len, store_end) + if sr.distance < sr.len>>2 { + range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2))) + } + + hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end) + } + + position += sr.len + } else { + insert_length++ + position++ + + /* If we have not seen matches for a long time, we can skip some + match lookups. Unsuccessful match lookups are very very expensive + and this kind of a heuristic speeds up compression quite + a lot. */ + if position > apply_random_heuristics { + /* Going through uncompressible data, jump. */ + if position > apply_random_heuristics+4*random_heuristics_window_size { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4) + /* It is quite a long time since we saw a copy, so we assume + that this data is not compressible, and store hashes less + often. Hashes of non compressible data are less likely to + turn out to be useful in the future, too, so we store less of + them to not to flood out the hash table of good compressible + data. */ + + var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin) + for ; position < pos_jump; position += 4 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 4 + } + } else { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2) + var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin) + for ; position < pos_jump; position += 2 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 2 + } + } + } + } + } + + insert_length += pos_end - position + *last_insert_len = insert_length + + hasherSearchResultPool.Put(sr) + hasherSearchResultPool.Put(sr2) +} diff --git a/vendor/github.com/andybalholm/brotli/backward_references_hq.go b/vendor/github.com/andybalholm/brotli/backward_references_hq.go new file mode 100644 index 00000000000..21629c1cdb7 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references_hq.go @@ -0,0 +1,796 @@ +package brotli + +import "math" + +type zopfliNode struct { + length uint32 + distance uint32 + dcode_insert_length uint32 + u struct { + cost float32 + next uint32 + shortcut uint32 + } +} + +const maxEffectiveDistanceAlphabetSize = 544 + +const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ + +var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} + +var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3} + +func initZopfliNodes(array []zopfliNode, length uint) { + var stub zopfliNode + var i uint + stub.length = 1 + stub.distance = 0 + stub.dcode_insert_length = 0 + stub.u.cost = kInfinity + for i = 0; i < length; i++ { + array[i] = stub + } +} + +func zopfliNodeCopyLength(self *zopfliNode) uint32 { + return self.length & 0x1FFFFFF +} + +func zopfliNodeLengthCode(self *zopfliNode) uint32 { + var modifier uint32 = self.length >> 25 + return zopfliNodeCopyLength(self) + 9 - modifier +} + +func zopfliNodeCopyDistance(self *zopfliNode) uint32 { + return self.distance +} + +func zopfliNodeDistanceCode(self *zopfliNode) uint32 { + var short_code uint32 = self.dcode_insert_length >> 27 + if short_code == 0 { + return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1 + } else { + return short_code - 1 + } +} + +func zopfliNodeCommandLength(self *zopfliNode) uint32 { + return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF) +} + +/* Histogram based cost model for zopflification. */ +type zopfliCostModel struct { + cost_cmd_ [numCommandSymbols]float32 + cost_dist_ []float32 + distance_histogram_size uint32 + literal_costs_ []float32 + min_cost_cmd_ float32 + num_bytes_ uint +} + +func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) { + var distance_histogram_size uint32 = dist.alphabet_size + if distance_histogram_size > maxEffectiveDistanceAlphabetSize { + distance_histogram_size = maxEffectiveDistanceAlphabetSize + } + + self.num_bytes_ = num_bytes + self.literal_costs_ = make([]float32, (num_bytes + 2)) + self.cost_dist_ = make([]float32, (dist.alphabet_size)) + self.distance_histogram_size = distance_histogram_size +} + +func cleanupZopfliCostModel(self *zopfliCostModel) { + self.literal_costs_ = nil + self.cost_dist_ = nil +} + +func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) { + var sum uint = 0 + var missing_symbol_sum uint + var log2sum float32 + var missing_symbol_cost float32 + var i uint + for i = 0; i < histogram_size; i++ { + sum += uint(histogram[i]) + } + + log2sum = float32(fastLog2(sum)) + missing_symbol_sum = sum + if !literal_histogram { + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + missing_symbol_sum++ + } + } + } + + missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2 + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + cost[i] = missing_symbol_cost + continue + } + + /* Shannon bits for this symbol. */ + cost[i] = log2sum - float32(fastLog2(uint(histogram[i]))) + + /* Cannot be coded with less than 1 bit */ + if cost[i] < 1 { + cost[i] = 1 + } + } +} + +func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) { + var histogram_literal [numLiteralSymbols]uint32 + var histogram_cmd [numCommandSymbols]uint32 + var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32 + var cost_literal [numLiteralSymbols]float32 + var pos uint = position - last_insert_len + var min_cost_cmd float32 = kInfinity + var cost_cmd []float32 = self.cost_cmd_[:] + var literal_costs []float32 + + histogram_literal = [numLiteralSymbols]uint32{} + histogram_cmd = [numCommandSymbols]uint32{} + histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{} + + for i := range commands { + var inslength uint = uint(commands[i].insert_len_) + var copylength uint = uint(commandCopyLen(&commands[i])) + var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF + var cmdcode uint = uint(commands[i].cmd_prefix_) + var j uint + + histogram_cmd[cmdcode]++ + if cmdcode >= 128 { + histogram_dist[distcode]++ + } + + for j = 0; j < inslength; j++ { + histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++ + } + + pos += inslength + copylength + } + + setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:]) + setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd) + setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_) + + for i := 0; i < numCommandSymbols; i++ { + min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i]) + } + + self.min_cost_cmd_ = min_cost_cmd + { + literal_costs = self.literal_costs_ + var literal_carry float32 = 0.0 + num_bytes := int(self.num_bytes_) + literal_costs[0] = 0.0 + for i := 0; i < num_bytes; i++ { + literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + } +} + +func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) { + var literal_costs []float32 = self.literal_costs_ + var literal_carry float32 = 0.0 + var cost_dist []float32 = self.cost_dist_ + var cost_cmd []float32 = self.cost_cmd_[:] + var num_bytes uint = self.num_bytes_ + var i uint + estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:]) + literal_costs[0] = 0.0 + for i = 0; i < num_bytes; i++ { + literal_carry += literal_costs[i+1] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + + for i = 0; i < numCommandSymbols; i++ { + cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i)))) + } + + for i = 0; uint32(i) < self.distance_histogram_size; i++ { + cost_dist[i] = float32(fastLog2(uint(20 + uint32(i)))) + } + + self.min_cost_cmd_ = float32(fastLog2(11)) +} + +func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 { + return self.cost_cmd_[cmdcode] +} + +func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 { + return self.cost_dist_[distcode] +} + +func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 { + return self.literal_costs_[to] - self.literal_costs_[from] +} + +func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 { + return self.min_cost_cmd_ +} + +/* REQUIRES: len >= 2, start_pos <= pos */ +/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */ +/* Maintains the "ZopfliNode array invariant". */ +func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) { + var next *zopfliNode = &nodes[pos+len] + next.length = uint32(len | (len+9-len_code)<<25) + next.distance = uint32(dist) + next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos)) + next.u.cost = cost +} + +type posData struct { + pos uint + distance_cache [4]int + costdiff float32 + cost float32 +} + +/* Maintains the smallest 8 cost difference together with their positions */ +type startPosQueue struct { + q_ [8]posData + idx_ uint +} + +func initStartPosQueue(self *startPosQueue) { + self.idx_ = 0 +} + +func startPosQueueSize(self *startPosQueue) uint { + return brotli_min_size_t(self.idx_, 8) +} + +func startPosQueuePush(self *startPosQueue, posdata *posData) { + var offset uint = ^(self.idx_) & 7 + self.idx_++ + var len uint = startPosQueueSize(self) + var i uint + var q []posData = self.q_[:] + q[offset] = *posdata + + /* Restore the sorted order. In the list of |len| items at most |len - 1| + adjacent element comparisons / swaps are required. */ + for i = 1; i < len; i++ { + if q[offset&7].costdiff > q[(offset+1)&7].costdiff { + var tmp posData = q[offset&7] + q[offset&7] = q[(offset+1)&7] + q[(offset+1)&7] = tmp + } + + offset++ + } +} + +func startPosQueueAt(self *startPosQueue, k uint) *posData { + return &self.q_[(k-self.idx_)&7] +} + +/* Returns the minimum possible copy length that can improve the cost of any */ +/* future position. */ +func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint { + var min_cost float32 = start_cost + var len uint = 2 + var next_len_bucket uint = 4 + /* Compute the minimum possible cost of reaching any future position. */ + + var next_len_offset uint = 10 + for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost { + /* We already reached (pos + len) with no more cost than the minimum + possible cost of reaching anything from this pos, so there is no point in + looking for lengths <= len. */ + len++ + + if len == next_len_offset { + /* We reached the next copy length code bucket, so we add one more + extra bit to the minimum cost. */ + min_cost += 1.0 + + next_len_offset += next_len_bucket + next_len_bucket *= 2 + } + } + + return uint(len) +} + +/* REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 { + var clen uint = uint(zopfliNodeCopyLength(&nodes[pos])) + var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos])) + + /* Since |block_start + pos| is the end position of the command, the copy part + starts from |block_start + pos - clen|. Distances that are greater than + this or greater than |max_backward_limit| + |gap| are static dictionary + references, and do not update the last distances. + Also distance code 0 (last distance) does not update the last distances. */ + if pos == 0 { + return 0 + } else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 { + return uint32(pos) + } else { + return nodes[pos-clen-ilen].u.shortcut + } +} + +/* Fills in dist_cache[0..3] with the last four distances (as defined by + Section 4. of the Spec) that would be used at (block_start + pos) if we + used the shortest path of commands from block_start, computed from + nodes[0..pos]. The last four distances at block_start are in + starting_dist_cache[0..3]. + REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) { + var idx int = 0 + var p uint = uint(nodes[pos].u.shortcut) + for idx < 4 && p > 0 { + var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF) + var clen uint = uint(zopfliNodeCopyLength(&nodes[p])) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[p])) + dist_cache[idx] = int(dist) + idx++ + + /* Because of prerequisite, p >= clen + ilen >= 2. */ + p = uint(nodes[p-clen-ilen].u.shortcut) + } + + for ; idx < 4; idx++ { + dist_cache[idx] = starting_dist_cache[0] + starting_dist_cache = starting_dist_cache[1:] + } +} + +/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it + is eligible. */ +func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) { + /* Save cost, because ComputeDistanceCache invalidates it. */ + var node_cost float32 = nodes[pos].u.cost + nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes) + if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) { + var posdata posData + posdata.pos = pos + posdata.cost = node_cost + posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos) + computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:]) + startPosQueuePush(queue, &posdata) + } +} + +/* Returns longest copy length. */ +func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint { + var cur_ix uint = block_start + pos + var cur_ix_masked uint = cur_ix & ringbuffer_mask + var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit) + var max_len uint = num_bytes - pos + var max_zopfli_len uint = maxZopfliLen(params) + var max_iters uint = maxZopfliCandidates(params) + var min_len uint + var result uint = 0 + var k uint + var gap uint = 0 + + evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes) + { + var posdata *posData = startPosQueueAt(queue, 0) + var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos)) + min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos) + } + + /* Go over the command starting positions in order of increasing cost + difference. */ + for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ { + var posdata *posData = startPosQueueAt(queue, k) + var start uint = posdata.pos + var inscode uint16 = getInsertLengthCode(pos - start) + var start_costdiff float32 = posdata.costdiff + var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos) + var best_len uint = min_len - 1 + var j uint = 0 + /* Look for last distance matches using the distance cache from this + starting position. */ + for ; j < numDistanceShortCodes && best_len < max_len; j++ { + var idx uint = uint(kDistanceCacheIndex[j]) + var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j]) + var prev_ix uint = cur_ix - backward + var len uint = 0 + var continuation byte = ringbuffer[cur_ix_masked+best_len] + if cur_ix_masked+best_len > ringbuffer_mask { + break + } + + if backward > max_distance+gap { + /* Word dictionary -> ignore. */ + continue + } + + if backward <= max_distance { + /* Regular backward reference. */ + if prev_ix >= cur_ix { + continue + } + + prev_ix &= ringbuffer_mask + if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] { + continue + } + + len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len) + } else { + continue + } + { + var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j) + var l uint + for l = best_len + 1; l <= len; l++ { + var copycode uint16 = getCopyLengthCode(l) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0) + var tmp float32 + if cmdcode < 128 { + tmp = base_cost + } else { + tmp = dist_cost + } + var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+l].u.cost { + updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost) + result = brotli_max_size_t(result, l) + } + + best_len = l + } + } + } + + /* At higher iterations look only for new last distance matches, since + looking only for new command start positions with the same distances + does not help much. */ + if k >= 2 { + continue + } + { + /* Loop through all possible copy lengths at this position. */ + var len uint = min_len + for j = 0; j < num_matches; j++ { + var match backwardMatch = matches[j] + var dist uint = uint(match.distance) + var is_dictionary_match bool = (dist > max_distance+gap) + var dist_code uint = dist + numDistanceShortCodes - 1 + var dist_symbol uint16 + var distextra uint32 + var distnumextra uint32 + var dist_cost float32 + var max_match_len uint + /* We already tried all possible last distance matches, so we can use + normal distance code here. */ + prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra) + + distnumextra = uint32(dist_symbol) >> 10 + dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF) + + /* Try all copy lengths up until the maximum copy length corresponding + to this distance. If the distance refers to the static dictionary, or + the maximum length is long enough, try only one maximum length. */ + max_match_len = backwardMatchLength(&match) + + if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) { + len = max_match_len + } + + for ; len <= max_match_len; len++ { + var len_code uint + if is_dictionary_match { + len_code = backwardMatchLengthCode(&match) + } else { + len_code = len + } + var copycode uint16 = getCopyLengthCode(len_code) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, false) + var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+len].u.cost { + updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost) + if len > result { + result = len + } + } + } + } + } + } + + return result +} + +func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint { + var index uint = num_bytes + var num_commands uint = 0 + for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 { + index-- + } + nodes[index].u.next = math.MaxUint32 + for index != 0 { + var len uint = uint(zopfliNodeCommandLength(&nodes[index])) + index -= uint(len) + nodes[index].u.next = uint32(len) + num_commands++ + } + + return num_commands +} + +/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */ +func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var pos uint = 0 + var offset uint32 = nodes[0].u.next + var i uint + var gap uint = 0 + for i = 0; offset != math.MaxUint32; i++ { + var next *zopfliNode = &nodes[uint32(pos)+offset] + var copy_length uint = uint(zopfliNodeCopyLength(next)) + var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF) + pos += insert_length + offset = next.u.next + if i == 0 { + insert_length += *last_insert_len + *last_insert_len = 0 + } + { + var distance uint = uint(zopfliNodeCopyDistance(next)) + var len_code uint = uint(zopfliNodeLengthCode(next)) + var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit) + var is_dictionary bool = (distance > max_distance+gap) + var dist_code uint = uint(zopfliNodeDistanceCode(next)) + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)) + + if !is_dictionary && dist_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(distance) + } + } + + *num_literals += insert_length + pos += copy_length + } + + *last_insert_len += num_bytes - pos +} + +func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var queue startPosQueue + var cur_match_pos uint = 0 + var i uint + nodes[0].length = 0 + nodes[0].u.cost = 0 + initStartPosQueue(&queue) + for i = 0; i+3 < num_bytes; i++ { + var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + cur_match_pos += uint(num_matches[i]) + if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip) + } + + if skip > 1 { + skip-- + for skip != 0 { + i++ + if i+3 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes) + cur_match_pos += uint(num_matches[i]) + skip-- + } + } + } + + return computeShortestPathFromNodes(num_bytes, nodes) +} + +/* Computes the shortest path of commands from position to at most + position + num_bytes. + + On return, path->size() is the number of commands found and path[i] is the + length of the i-th command (copy length plus insert length). + Note that the sum of the lengths of all commands can be less than num_bytes. + + On return, the nodes[0..num_bytes] array will have the following + "ZopfliNode array invariant": + For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then + (1) nodes[i].copy_length() >= 2 + (2) nodes[i].command_length() <= i and + (3) nodes[i - nodes[i].command_length()].cost < kInfinity + + REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */ +func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var model zopfliCostModel + var queue startPosQueue + var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var i uint + var gap uint = 0 + var lz_matches_offset uint = 0 + nodes[0].length = 0 + nodes[0].u.cost = 0 + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + initStartPosQueue(&queue) + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var skip uint + var num_matches uint + num_matches = findAllMatchesH10(hasher, ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:]) + if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len { + matches[0] = matches[num_matches-1] + num_matches = 1 + } + + skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip) + } + + if skip > 1 { + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end)) + + skip-- + for skip != 0 { + i++ + if i+hasher.HashTypeLength()-1 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes) + skip-- + } + } + } + + cleanupZopfliCostModel(&model) + return computeShortestPathFromNodes(num_bytes, nodes) +} + +func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var nodes []zopfliNode + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliNodes(nodes, num_bytes+1) + zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + nodes = nil +} + +func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var num_matches []uint32 = make([]uint32, num_bytes) + var matches_size uint = 4 * num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var cur_match_pos uint = 0 + var i uint + var orig_num_literals uint + var orig_last_insert_len uint + var orig_dist_cache [4]int + var orig_num_commands int + var model zopfliCostModel + var nodes []zopfliNode + var matches []backwardMatch = make([]backwardMatch, matches_size) + var gap uint = 0 + var shadow_matches uint = 0 + var new_array []backwardMatch + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var max_length uint = num_bytes - i + var num_found_matches uint + var cur_match_end uint + var j uint + + /* Ensure that we have enough free slots. */ + if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + var new_size uint = matches_size + if new_size == 0 { + new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches + } + + for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + new_size *= 2 + } + + new_array = make([]backwardMatch, new_size) + if matches_size != 0 { + copy(new_array, matches[:matches_size]) + } + + matches = new_array + matches_size = new_size + } + + num_found_matches = findAllMatchesH10(hasher.(*h10), ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:]) + cur_match_end = cur_match_pos + num_found_matches + for j = cur_match_pos; j+1 < cur_match_end; j++ { + assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1])) + } + + num_matches[i] = uint32(num_found_matches) + if num_found_matches > 0 { + var match_len uint = backwardMatchLength(&matches[cur_match_end-1]) + if match_len > maxZopfliLenQuality11 { + var skip uint = match_len - 1 + matches[cur_match_pos] = matches[cur_match_end-1] + cur_match_pos++ + num_matches[i] = 1 + + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end)) + var pos uint = i + for i := 0; i < int(skip); i++ { + num_matches[pos+1:][i] = 0 + } + i += skip + } else { + cur_match_pos = cur_match_end + } + } + } + + orig_num_literals = *num_literals + orig_last_insert_len = *last_insert_len + copy(orig_dist_cache[:], dist_cache[:4]) + orig_num_commands = len(*commands) + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + for i = 0; i < 2; i++ { + initZopfliNodes(nodes, num_bytes+1) + if i == 0 { + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + } else { + zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len) + } + + *commands = (*commands)[:orig_num_commands] + *num_literals = orig_num_literals + *last_insert_len = orig_last_insert_len + copy(dist_cache, orig_dist_cache[:4]) + zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + } + + cleanupZopfliCostModel(&model) + nodes = nil + matches = nil + num_matches = nil +} diff --git a/vendor/github.com/andybalholm/brotli/bit_cost.go b/vendor/github.com/andybalholm/brotli/bit_cost.go new file mode 100644 index 00000000000..0005fc15e63 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_cost.go @@ -0,0 +1,436 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions to estimate the bit cost of Huffman trees. */ +func shannonEntropy(population []uint32, size uint, total *uint) float64 { + var sum uint = 0 + var retval float64 = 0 + var population_end []uint32 = population[size:] + var p uint + for -cap(population) < -cap(population_end) { + p = uint(population[0]) + population = population[1:] + sum += p + retval -= float64(p) * fastLog2(p) + } + + if sum != 0 { + retval += float64(sum) * fastLog2(sum) + } + *total = sum + return retval +} + +func bitsEntropy(population []uint32, size uint) float64 { + var sum uint + var retval float64 = shannonEntropy(population, size, &sum) + if retval < float64(sum) { + /* At least one bit per literal is needed. */ + retval = float64(sum) + } + + return retval +} + +const kOneSymbolHistogramCost float64 = 12 +const kTwoSymbolHistogramCost float64 = 20 +const kThreeSymbolHistogramCost float64 = 28 +const kFourSymbolHistogramCost float64 = 37 + +func populationCostLiteral(histogram *histogramLiteral) float64 { + var data_size uint = histogramDataSizeLiteral() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostCommand(histogram *histogramCommand) float64 { + var data_size uint = histogramDataSizeCommand() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostDistance(histogram *histogramDistance) float64 { + var data_size uint = histogramDataSizeDistance() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} diff --git a/vendor/github.com/andybalholm/brotli/bit_reader.go b/vendor/github.com/andybalholm/brotli/bit_reader.go new file mode 100644 index 00000000000..fba8687c69f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_reader.go @@ -0,0 +1,266 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Bit reading helpers */ + +const shortFillBitWindowRead = (8 >> 1) + +var kBitMask = [33]uint32{ + 0x00000000, + 0x00000001, + 0x00000003, + 0x00000007, + 0x0000000F, + 0x0000001F, + 0x0000003F, + 0x0000007F, + 0x000000FF, + 0x000001FF, + 0x000003FF, + 0x000007FF, + 0x00000FFF, + 0x00001FFF, + 0x00003FFF, + 0x00007FFF, + 0x0000FFFF, + 0x0001FFFF, + 0x0003FFFF, + 0x0007FFFF, + 0x000FFFFF, + 0x001FFFFF, + 0x003FFFFF, + 0x007FFFFF, + 0x00FFFFFF, + 0x01FFFFFF, + 0x03FFFFFF, + 0x07FFFFFF, + 0x0FFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + 0xFFFFFFFF, +} + +func bitMask(n uint32) uint32 { + return kBitMask[n] +} + +type bitReader struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +type bitReaderState struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +/* Initializes the BrotliBitReader fields. */ + +/* Ensures that accumulator is not empty. + May consume up to sizeof(brotli_reg_t) - 1 bytes of input. + Returns false if data is required but there is no input available. + For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned + reading. */ +func bitReaderSaveState(from *bitReader, to *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func bitReaderRestoreState(to *bitReader, from *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func getAvailableBits(br *bitReader) uint32 { + return 64 - br.bit_pos_ +} + +/* Returns amount of unread bytes the bit reader still has buffered from the + BrotliInput, including whole bytes in br->val_. */ +func getRemainingBytes(br *bitReader) uint { + return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3)) +} + +/* Checks if there is at least |num| bytes left in the input ring-buffer + (excluding the bits remaining in br->val_). */ +func checkInputAmount(br *bitReader, num uint) bool { + return br.input_len-br.byte_pos >= num +} + +/* Guarantees that there are at least |n_bits| + 1 bits in accumulator. + Precondition: accumulator contains at least 1 bit. + |n_bits| should be in the range [1..24] for regular build. For portable + non-64-bit little-endian build only 16 bits are safe to request. */ +func fillBitWindow(br *bitReader, n_bits uint32) { + if br.bit_pos_ >= 32 { + br.val_ >>= 32 + br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */ + br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32 + br.byte_pos += 4 + } +} + +/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no + more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */ +func fillBitWindow16(br *bitReader) { + fillBitWindow(br, 17) +} + +/* Tries to pull one byte of input to accumulator. + Returns false if there is no input available. */ +func pullByte(br *bitReader) bool { + if br.byte_pos == br.input_len { + return false + } + + br.val_ >>= 8 + br.val_ |= (uint64(br.input[br.byte_pos])) << 56 + br.bit_pos_ -= 8 + br.byte_pos++ + return true +} + +/* Returns currently available bits. + The number of valid bits could be calculated by BrotliGetAvailableBits. */ +func getBitsUnmasked(br *bitReader) uint64 { + return br.val_ >> br.bit_pos_ +} + +/* Like BrotliGetBits, but does not mask the result. + The result contains at least 16 valid bits. */ +func get16BitsUnmasked(br *bitReader) uint32 { + fillBitWindow(br, 16) + return uint32(getBitsUnmasked(br)) +} + +/* Returns the specified number of bits from |br| without advancing bit + position. */ +func getBits(br *bitReader, n_bits uint32) uint32 { + fillBitWindow(br, n_bits) + return uint32(getBitsUnmasked(br)) & bitMask(n_bits) +} + +/* Tries to peek the specified amount of bits. Returns false, if there + is not enough input. */ +func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + return true +} + +/* Advances the bit pos by |n_bits|. */ +func dropBits(br *bitReader, n_bits uint32) { + br.bit_pos_ += n_bits +} + +func bitReaderUnload(br *bitReader) { + var unused_bytes uint32 = getAvailableBits(br) >> 3 + var unused_bits uint32 = unused_bytes << 3 + br.byte_pos -= uint(unused_bytes) + if unused_bits == 64 { + br.val_ = 0 + } else { + br.val_ <<= unused_bits + } + + br.bit_pos_ += unused_bits +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Precondition: accumulator MUST contain at least |n_bits|. */ +func takeBits(br *bitReader, n_bits uint32, val *uint32) { + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + dropBits(br, n_bits) +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Assumes that there is enough input to perform BrotliFillBitWindow. */ +func readBits(br *bitReader, n_bits uint32) uint32 { + var val uint32 + fillBitWindow(br, n_bits) + takeBits(br, n_bits, &val) + return val +} + +/* Tries to read the specified amount of bits. Returns false, if there + is not enough input. |n_bits| MUST be positive. */ +func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + takeBits(br, n_bits, val) + return true +} + +/* Advances the bit reader position to the next byte boundary and verifies + that any skipped bits are set to zero. */ +func bitReaderJumpToByteBoundary(br *bitReader) bool { + var pad_bits_count uint32 = getAvailableBits(br) & 0x7 + var pad_bits uint32 = 0 + if pad_bits_count != 0 { + takeBits(br, pad_bits_count, &pad_bits) + } + + return pad_bits == 0 +} + +/* Copies remaining input bytes stored in the bit reader to the output. Value + |num| may not be larger than BrotliGetRemainingBytes. The bit reader must be + warmed up again after this. */ +func copyBytes(dest []byte, br *bitReader, num uint) { + for getAvailableBits(br) >= 8 && num > 0 { + dest[0] = byte(getBitsUnmasked(br)) + dropBits(br, 8) + dest = dest[1:] + num-- + } + + copy(dest, br.input[br.byte_pos:][:num]) + br.byte_pos += num +} + +func initBitReader(br *bitReader) { + br.val_ = 0 + br.bit_pos_ = 64 +} + +func warmupBitReader(br *bitReader) bool { + /* Fixing alignment after unaligned BrotliFillWindow would result accumulator + overflow. If unalignment is caused by BrotliSafeReadBits, then there is + enough space in accumulator to fix alignment. */ + if getAvailableBits(br) == 0 { + if !pullByte(br) { + return false + } + } + + return true +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter.go b/vendor/github.com/andybalholm/brotli/block_splitter.go new file mode 100644 index 00000000000..978a1314748 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter.go @@ -0,0 +1,144 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Block split point selection utilities. */ + +type blockSplit struct { + num_types uint + num_blocks uint + types []byte + lengths []uint32 + types_alloc_size uint + lengths_alloc_size uint +} + +const ( + kMaxLiteralHistograms uint = 100 + kMaxCommandHistograms uint = 50 + kLiteralBlockSwitchCost float64 = 28.1 + kCommandBlockSwitchCost float64 = 13.5 + kDistanceBlockSwitchCost float64 = 14.6 + kLiteralStrideLength uint = 70 + kCommandStrideLength uint = 40 + kSymbolsPerLiteralHistogram uint = 544 + kSymbolsPerCommandHistogram uint = 530 + kSymbolsPerDistanceHistogram uint = 544 + kMinLengthForBlockSplitting uint = 128 + kIterMulForRefining uint = 2 + kMinItersForRefining uint = 100 +) + +func countLiterals(cmds []command) uint { + var total_length uint = 0 + /* Count how many we have. */ + + for i := range cmds { + total_length += uint(cmds[i].insert_len_) + } + + return total_length +} + +func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) { + var pos uint = 0 + var from_pos uint = offset & mask + for i := range cmds { + var insert_len uint = uint(cmds[i].insert_len_) + if from_pos+insert_len > mask { + var head_size uint = mask + 1 - from_pos + copy(literals[pos:], data[from_pos:][:head_size]) + from_pos = 0 + pos += head_size + insert_len -= head_size + } + + if insert_len > 0 { + copy(literals[pos:], data[from_pos:][:insert_len]) + pos += insert_len + } + + from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask)) + } +} + +func myRand(seed *uint32) uint32 { + /* Initial seed should be 7. In this case, loop length is (1 << 29). */ + *seed *= 16807 + + return *seed +} + +func bitCost(count uint) float64 { + if count == 0 { + return -2.0 + } else { + return fastLog2(count) + } +} + +const histogramsPerBatch = 64 + +const clustersPerBatch = 16 + +func initBlockSplit(self *blockSplit) { + self.num_types = 0 + self.num_blocks = 0 + self.types = self.types[:0] + self.lengths = self.lengths[:0] + self.types_alloc_size = 0 + self.lengths_alloc_size = 0 +} + +func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) { + { + var literals_count uint = countLiterals(cmds) + var literals []byte = make([]byte, literals_count) + + /* Create a continuous array of literals. */ + copyLiteralsToByteArray(cmds, data, pos, mask, literals) + + /* Create the block split on the array of literals. + Literal histograms have alphabet size 256. */ + splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split) + + literals = nil + } + { + var insert_and_copy_codes []uint16 = make([]uint16, len(cmds)) + /* Compute prefix codes for commands. */ + + for i := range cmds { + insert_and_copy_codes[i] = cmds[i].cmd_prefix_ + } + + /* Create the block split on the array of command prefixes. */ + splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split) + + /* TODO: reuse for distances? */ + + insert_and_copy_codes = nil + } + { + var distance_prefixes []uint16 = make([]uint16, len(cmds)) + var j uint = 0 + /* Create a continuous array of distance prefixes. */ + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF + j++ + } + } + + /* Create the block split on the array of distance prefixes. */ + splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split) + + distance_prefixes = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_command.go b/vendor/github.com/andybalholm/brotli/block_splitter_command.go new file mode 100644 index 00000000000..9dec13e4d90 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_command.go @@ -0,0 +1,434 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorCommand(&histograms[i], data[pos:], stride) + } +} + +func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorCommand(sample, data[pos:], stride) +} + +func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramCommand + histogramClearCommand(&sample) + randomSampleCommand(&seed, data, length, stride, &sample) + histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeCommand() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsCommand_kInvalidId uint16 = 256 + +func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsCommand_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) { + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddCommand(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearCommand(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddCommand(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostCommand(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramCommand + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramCommand, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksCommand_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramCommand + var j uint + var best_out uint32 + var best_bits float64 + histogramClearCommand(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddCommand(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksCommand_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + length := uint(len(data)) + var data_size uint = histogramDataSizeCommand() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramCommand + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramCommand, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms) + buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksCommand(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_distance.go b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go new file mode 100644 index 00000000000..953530d518e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorDistance(&histograms[i], data[pos:], stride) + } +} + +func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorDistance(sample, data[pos:], stride) +} + +func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramDistance + histogramClearDistance(&sample) + randomSampleDistance(&seed, data, length, stride, &sample) + histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeDistance() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsDistance_kInvalidId uint16 = 256 + +func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsDistance_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) { + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddDistance(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearDistance(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddDistance(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostDistance(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramDistance + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramDistance, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksDistance_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramDistance + var j uint + var best_out uint32 + var best_bits float64 + histogramClearDistance(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddDistance(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksDistance_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeDistance() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramDistance + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramDistance, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms) + buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksDistance(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_literal.go b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go new file mode 100644 index 00000000000..1c895cf3889 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorLiteral(&histograms[i], data[pos:], stride) + } +} + +func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorLiteral(sample, data[pos:], stride) +} + +func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramLiteral + histogramClearLiteral(&sample) + randomSampleLiteral(&seed, data, length, stride, &sample) + histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeLiteral() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsLiteral_kInvalidId uint16 = 256 + +func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsLiteral_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) { + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddLiteral(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearLiteral(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddLiteral(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostLiteral(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramLiteral + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramLiteral, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksLiteral_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramLiteral + var j uint + var best_out uint32 + var best_bits float64 + histogramClearLiteral(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddLiteral(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeLiteral() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramLiteral + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramLiteral, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms) + buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksLiteral(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go new file mode 100644 index 00000000000..7acfb180616 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go @@ -0,0 +1,1300 @@ +package brotli + +import ( + "math" + "sync" +) + +const maxHuffmanTreeSize = (2*numCommandSymbols + 1) + +/* The maximum size of Huffman dictionary for distances assuming that + NPOSTFIX = 0 and NDIRECT = 0. */ +const maxSimpleDistanceAlphabetSize = 140 + +/* Represents the range of values belonging to a prefix code: + [offset, offset + 2^nbits) */ +type prefixCodeRange struct { + offset uint32 + nbits uint32 +} + +var kBlockLengthPrefixCode = [numBlockLenSymbols]prefixCodeRange{ + prefixCodeRange{1, 2}, + prefixCodeRange{5, 2}, + prefixCodeRange{9, 2}, + prefixCodeRange{13, 2}, + prefixCodeRange{17, 3}, + prefixCodeRange{25, 3}, + prefixCodeRange{33, 3}, + prefixCodeRange{41, 3}, + prefixCodeRange{49, 4}, + prefixCodeRange{65, 4}, + prefixCodeRange{81, 4}, + prefixCodeRange{97, 4}, + prefixCodeRange{113, 5}, + prefixCodeRange{145, 5}, + prefixCodeRange{177, 5}, + prefixCodeRange{209, 5}, + prefixCodeRange{241, 6}, + prefixCodeRange{305, 6}, + prefixCodeRange{369, 7}, + prefixCodeRange{497, 8}, + prefixCodeRange{753, 9}, + prefixCodeRange{1265, 10}, + prefixCodeRange{2289, 11}, + prefixCodeRange{4337, 12}, + prefixCodeRange{8433, 13}, + prefixCodeRange{16625, 24}, +} + +func blockLengthPrefixCode(len uint32) uint32 { + var code uint32 + if len >= 177 { + if len >= 753 { + code = 20 + } else { + code = 14 + } + } else if len >= 41 { + code = 7 + } else { + code = 0 + } + for code < (numBlockLenSymbols-1) && len >= kBlockLengthPrefixCode[code+1].offset { + code++ + } + return code +} + +func getBlockLengthPrefixCode(len uint32, code *uint, n_extra *uint32, extra *uint32) { + *code = uint(blockLengthPrefixCode(uint32(len))) + *n_extra = kBlockLengthPrefixCode[*code].nbits + *extra = len - kBlockLengthPrefixCode[*code].offset +} + +type blockTypeCodeCalculator struct { + last_type uint + second_last_type uint +} + +func initBlockTypeCodeCalculator(self *blockTypeCodeCalculator) { + self.last_type = 1 + self.second_last_type = 0 +} + +func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { + var type_code uint + if uint(type_) == calculator.last_type+1 { + type_code = 1 + } else if uint(type_) == calculator.second_last_type { + type_code = 0 + } else { + type_code = uint(type_) + 2 + } + calculator.second_last_type = calculator.last_type + calculator.last_type = uint(type_) + return type_code +} + +/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) */ +func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { + var lg uint + if length == 1 { + lg = 1 + } else { + lg = uint(log2FloorNonZero(uint(uint32(length-1)))) + 1 + } + var tmp uint + if lg < 16 { + tmp = 16 + } else { + tmp = (lg + 3) + } + var mnibbles uint = tmp / 4 + assert(length > 0) + assert(length <= 1<<24) + assert(lg <= 24) + *nibblesbits = uint64(mnibbles) - 4 + *numbits = mnibbles * 4 + *bits = uint64(length) - 1 +} + +func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { + var copylen_code uint32 = commandCopyLenCode(cmd) + var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_)) + var copycode uint16 = getCopyLengthCode(uint(copylen_code)) + var insnumextra uint32 = getInsertExtra(inscode) + var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode)) + var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode)) + var bits uint64 = copyextraval< 0 + REQUIRES: length <= (1 << 24) */ +func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + var is_final uint64 + if is_final_block { + is_final = 1 + } else { + is_final = 0 + } + + /* Write ISLAST bit. */ + writeBits(1, is_final, storage_ix, storage) + + /* Write ISEMPTY bit. */ + if is_final_block { + writeBits(1, 0, storage_ix, storage) + } + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + if !is_final_block { + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 0, storage_ix, storage) + } +} + +/* Stores the uncompressed meta-block header. + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) */ +func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + + /* Write ISLAST bit. + Uncompressed block cannot be the last one, so set to 0. */ + writeBits(1, 0, storage_ix, storage) + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 1, storage_ix, storage) +} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15} +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4} + +func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) { + var skip_some uint = 0 + var codes_to_store uint = codeLengthCodes + /* The bit lengths of the Huffman code over the code length alphabet + are compressed with the following static Huffman code: + Symbol Code + ------ ---- + 0 00 + 1 1110 + 2 110 + 3 01 + 4 10 + 5 1111 */ + + /* Throw away trailing zeros: */ + if num_codes > 1 { + for ; codes_to_store > 0; codes_to_store-- { + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[codes_to_store-1]] != 0 { + break + } + } + } + + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[0]] == 0 && code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[1]] == 0 { + skip_some = 2 /* skips two. */ + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[2]] == 0 { + skip_some = 3 /* skips three. */ + } + } + + writeBits(2, uint64(skip_some), storage_ix, storage) + { + var i uint + for i = skip_some; i < codes_to_store; i++ { + var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]]) + writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage) + } + } +} + +func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) { + var i uint + for i = 0; i < huffman_tree_size; i++ { + var ix uint = uint(huffman_tree[i]) + writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage) + + /* Extra bits */ + switch ix { + case repeatPreviousCodeLength: + writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + + case repeatZeroCodeLength: + writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + } + } +} + +func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) { + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */ + { + /* Sort */ + var i uint + for i = 0; i < num_symbols; i++ { + var j uint + for j = i + 1; j < num_symbols; j++ { + if depths[symbols[j]] < depths[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + } + + if num_symbols == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if num_symbols == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depths[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } +} + +/* num = alphabet size + depths = symbol depths */ +func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var huffman_tree [numCommandSymbols]byte + var huffman_tree_extra_bits [numCommandSymbols]byte + var huffman_tree_size uint = 0 + var code_length_bitdepth = [codeLengthCodes]byte{0} + var code_length_bitdepth_symbols [codeLengthCodes]uint16 + var huffman_tree_histogram = [codeLengthCodes]uint32{0} + var i uint + var num_codes int = 0 + /* Write the Huffman tree into the brotli-representation. + The command alphabet is the largest, so this allocation will fit all + alphabets. */ + + var code uint = 0 + + assert(num <= numCommandSymbols) + + writeHuffmanTree(depths, num, &huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:]) + + /* Calculate the statistics of the Huffman tree in brotli-representation. */ + for i = 0; i < huffman_tree_size; i++ { + huffman_tree_histogram[huffman_tree[i]]++ + } + + for i = 0; i < codeLengthCodes; i++ { + if huffman_tree_histogram[i] != 0 { + if num_codes == 0 { + code = i + num_codes = 1 + } else if num_codes == 1 { + num_codes = 2 + break + } + } + } + + /* Calculate another Huffman tree to use for compressing both the + earlier Huffman tree with. */ + createHuffmanTree(huffman_tree_histogram[:], codeLengthCodes, 5, tree, code_length_bitdepth[:]) + + convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:]) + + /* Now, we have all the data, let's start storing it */ + storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage) + + if num_codes == 1 { + code_length_bitdepth[code] = 0 + } + + /* Store the real Huffman tree now. */ + storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) +} + +/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and + bits[0:length] and stores the encoded tree to the bit stream. */ +func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var s4 = [4]uint{0} + var i uint + var max_bits uint = 0 + for i = 0; i < histogram_length; i++ { + if histogram[i] != 0 { + if count < 4 { + s4[count] = i + } else if count > 4 { + break + } + + count++ + } + } + { + var max_bits_counter uint = alphabet_size - 1 + for max_bits_counter != 0 { + max_bits_counter >>= 1 + max_bits++ + } + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(s4[0]), storage_ix, storage) + depth[s4[0]] = 0 + bits[s4[0]] = 0 + return + } + + for i := 0; i < int(histogram_length); i++ { + depth[i] = 0 + } + createHuffmanTree(histogram, histogram_length, 15, tree, depth) + convertBitDepthsToSymbols(depth, histogram_length, bits) + + if count <= 4 { + storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage) + } else { + storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage) + } +} + +func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool { + return v0.total_count_ < v1.total_count_ +} + +var huffmanTreePool sync.Pool + +func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if count == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depth[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCode(storage_ix, storage) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage) + } else { + if previous_value != value { + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + } + } else { + reps -= 3 + writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage) + } + + previous_value = value + } + } + } +} + +func indexOf(v []byte, v_size uint, value byte) uint { + var i uint = 0 + for ; i < v_size; i++ { + if v[i] == value { + return i + } + } + + return i +} + +func moveToFront(v []byte, index uint) { + var value byte = v[index] + var i uint + for i = index; i != 0; i-- { + v[i] = v[i-1] + } + + v[0] = value +} + +func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { + var i uint + var mtf [256]byte + var max_value uint32 + if v_size == 0 { + return + } + + max_value = v_in[0] + for i = 1; i < v_size; i++ { + if v_in[i] > max_value { + max_value = v_in[i] + } + } + + assert(max_value < 256) + for i = 0; uint32(i) <= max_value; i++ { + mtf[i] = byte(i) + } + { + var mtf_size uint = uint(max_value + 1) + for i = 0; i < v_size; i++ { + var index uint = indexOf(mtf[:], mtf_size, byte(v_in[i])) + assert(index < mtf_size) + v_out[i] = uint32(index) + moveToFront(mtf[:], index) + } + } +} + +/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of + the run length plus extra bits (lower 9 bits is the prefix code and the rest + are the extra bits). Non-zero values in v[] are shifted by + *max_length_prefix. Will not create prefix codes bigger than the initial + value of *max_run_length_prefix. The prefix code of run length L is simply + Log2Floor(L) and the number of extra bits is the same as the prefix code. */ +func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { + var max_reps uint32 = 0 + var i uint + var max_prefix uint32 + for i = 0; i < in_size; { + var reps uint32 = 0 + for ; i < in_size && v[i] != 0; i++ { + } + for ; i < in_size && v[i] == 0; i++ { + reps++ + } + + max_reps = brotli_max_uint32_t(reps, max_reps) + } + + if max_reps > 0 { + max_prefix = log2FloorNonZero(uint(max_reps)) + } else { + max_prefix = 0 + } + max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix) + *max_run_length_prefix = max_prefix + *out_size = 0 + for i = 0; i < in_size; { + assert(*out_size <= i) + if v[i] != 0 { + v[*out_size] = v[i] + *max_run_length_prefix + i++ + (*out_size)++ + } else { + var reps uint32 = 1 + var k uint + for k = i + 1; k < in_size && v[k] == 0; k++ { + reps++ + } + + i += uint(reps) + for reps != 0 { + if reps < 2< 0) + writeSingleBit(use_rle, storage_ix, storage) + if use_rle { + writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage) + } + } + + buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_rle_symbols; i++ { + var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask + var extra_bits_val uint32 = rle_symbols[i] >> symbolBits + writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage) + if rle_symbol > 0 && rle_symbol <= max_run_length_prefix { + writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage) + } + } + + writeBits(1, 1, storage_ix, storage) /* use move-to-front */ + rle_symbols = nil +} + +/* Stores the block switch command with index block_ix to the bit stream. */ +func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) { + var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type) + var lencode uint + var len_nextra uint32 + var len_extra uint32 + if !is_first_block { + writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage) + } + + getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra) + + writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage) + writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) +} + +/* Builds a BlockSplitCode data structure from the block split given by the + vector of block types and block lengths and stores it to the bit stream. */ +func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { + var type_histo [maxBlockTypeSymbols]uint32 + var length_histo [numBlockLenSymbols]uint32 + var i uint + var type_code_calculator blockTypeCodeCalculator + for i := 0; i < int(num_types+2); i++ { + type_histo[i] = 0 + } + length_histo = [numBlockLenSymbols]uint32{} + initBlockTypeCodeCalculator(&type_code_calculator) + for i = 0; i < num_blocks; i++ { + var type_code uint = nextBlockTypeCode(&type_code_calculator, types[i]) + if i != 0 { + type_histo[type_code]++ + } + length_histo[blockLengthPrefixCode(lengths[i])]++ + } + + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */ + buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage) + buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage) + storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage) + } +} + +/* Stores a context map where the histogram type is always the block type. */ +func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { + var repeat_code uint = context_bits - 1 + var repeat_bits uint = (1 << repeat_code) - 1 + var alphabet_size uint = num_types + repeat_code + var histogram [maxContextMapSymbols]uint32 + var depths [maxContextMapSymbols]byte + var bits [maxContextMapSymbols]uint16 + var i uint + for i := 0; i < int(alphabet_size); i++ { + histogram[i] = 0 + } + + /* Write RLEMAX. */ + writeBits(1, 1, storage_ix, storage) + + writeBits(4, uint64(repeat_code)-1, storage_ix, storage) + histogram[repeat_code] = uint32(num_types) + histogram[0] = 1 + for i = context_bits; i < alphabet_size; i++ { + histogram[i] = 1 + } + + buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_types; i++ { + var tmp uint + if i == 0 { + tmp = 0 + } else { + tmp = i + context_bits - 1 + } + var code uint = tmp + writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage) + writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage) + } + + /* Write IMTF (inverse-move-to-front) bit. */ + writeBits(1, 1, storage_ix, storage) + } +} + +/* Manages the encoding of one block category (literal, command or distance). */ +type blockEncoder struct { + histogram_length_ uint + num_block_types_ uint + block_types_ []byte + block_lengths_ []uint32 + num_blocks_ uint + block_split_code_ blockSplitCode + block_ix_ uint + block_len_ uint + entropy_ix_ uint + depths_ []byte + bits_ []uint16 +} + +var blockEncoderPool sync.Pool + +func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder { + self, _ := blockEncoderPool.Get().(*blockEncoder) + + if self != nil { + self.block_ix_ = 0 + self.entropy_ix_ = 0 + self.depths_ = self.depths_[:0] + self.bits_ = self.bits_[:0] + } else { + self = &blockEncoder{} + } + + self.histogram_length_ = histogram_length + self.num_block_types_ = num_block_types + self.block_types_ = block_types + self.block_lengths_ = block_lengths + self.num_blocks_ = num_blocks + initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator) + if num_blocks == 0 { + self.block_len_ = 0 + } else { + self.block_len_ = uint(block_lengths[0]) + } + + return self +} + +func cleanupBlockEncoder(self *blockEncoder) { + blockEncoderPool.Put(self) +} + +/* Creates entropy codes of block lengths and block types and stores them + to the bit stream. */ +func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { + buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) +} + +/* Stores the next symbol with the entropy code of the current block type. + Updates the block type and block length at block boundaries. */ +func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) * self.histogram_length_ + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var ix uint = self.entropy_ix_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +/* Stores the next symbol with the entropy code of the current block type and + context value. + Updates the block type and block length at block boundaries. */ +func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) << context_bits + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var histo_ix uint = uint(context_map[self.entropy_ix_+context]) + var ix uint = histo_ix*self.histogram_length_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func jumpToByteBoundary(storage_ix *uint, storage []byte) { + *storage_ix = (*storage_ix + 7) &^ 7 + storage[*storage_ix>>3] = 0 +} + +func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) { + var pos uint = start_pos + var i uint + var num_distance_symbols uint32 = params.dist.alphabet_size + var num_effective_distance_symbols uint32 = num_distance_symbols + var tree []huffmanTree + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var dist *distanceParams = ¶ms.dist + if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols { + num_effective_distance_symbols = numHistogramDistanceSymbols + } + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks) + command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks) + distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks) + + buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage) + + writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage) + writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage) + for i = 0; i < mb.literal_split.num_types; i++ { + writeBits(2, uint64(literal_context_mode), storage_ix, storage) + } + + if mb.literal_context_map_size == 0 { + storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage) + } + + if mb.distance_context_map_size == 0 { + storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage) + } + + buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage) + tree = nil + + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + storeSymbol(command_enc, cmd_code, storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + if mb.literal_context_map_size == 0 { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage) + pos++ + } + } else { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut)) + var literal byte = input[pos&mask] + storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits) + prev_byte2 = prev_byte + prev_byte = literal + pos++ + } + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 { + prev_byte2 = input[(pos-2)&mask] + prev_byte = input[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint64 = uint64(cmd.dist_extra_) + if mb.distance_context_map_size == 0 { + storeSymbol(distance_enc, dist_code, storage_ix, storage) + } else { + var context uint = uint(commandDistanceContext(&cmd)) + storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits) + } + + writeBits(uint(distnumextra), distextra, storage_ix, storage) + } + } + } + + cleanupBlockEncoder(distance_enc) + cleanupBlockEncoder(command_enc) + cleanupBlockEncoder(literal_enc) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) { + var pos uint = start_pos + for _, cmd := range commands { + var j uint + histogramAddCommand(cmd_histo, uint(cmd.cmd_prefix_)) + for j = uint(cmd.insert_len_); j != 0; j-- { + histogramAddLiteral(lit_histo, uint(input[pos&mask])) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + histogramAddDistance(dist_histo, uint(cmd.dist_prefix_)&0x3FF) + } + } +} + +func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) { + var pos uint = start_pos + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + var j uint + writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + for j = uint(cmd.insert_len_); j != 0; j-- { + var literal byte = input[pos&mask] + writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint32 = cmd.dist_extra_ + writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage) + writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage) + } + } +} + +func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + var tree []huffmanTree + var num_distance_symbols uint32 = params.dist.alphabet_size + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + + writeBits(13, 0, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage) + tree = nil + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var num_distance_symbols uint32 = params.dist.alphabet_size + var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1 + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + writeBits(13, 0, storage_ix, storage) + + if len(commands) <= 128 { + var histogram = [numLiteralSymbols]uint32{0} + var pos uint = start_pos + var num_literals uint = 0 + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + for _, cmd := range commands { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + histogram[input[pos&mask]]++ + pos++ + } + + num_literals += uint(cmd.insert_len_) + pos += uint(commandCopyLen(&cmd)) + } + + buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + storeStaticCommandHuffmanTree(storage_ix, storage) + storeStaticDistanceHuffmanTree(storage_ix, storage) + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage) + } else { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */ + 10, cmd_depth[:], cmd_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */ + uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage) + + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + } + + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +/* This is for storing uncompressed blocks (simple raw storage of + bytes-as-bytes). */ +func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { + var masked_pos uint = position & mask + storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) + jumpToByteBoundary(storage_ix, storage) + + if masked_pos+len > mask+1 { + var len1 uint = mask + 1 - masked_pos + copy(storage[*storage_ix>>3:], input[masked_pos:][:len1]) + *storage_ix += len1 << 3 + len -= len1 + masked_pos = 0 + } + + copy(storage[*storage_ix>>3:], input[masked_pos:][:len]) + *storage_ix += uint(len << 3) + + /* We need to clear the next 4 bytes to continue to be + compatible with BrotliWriteBits. */ + writeBitsPrepareStorage(*storage_ix, storage) + + /* Since the uncompressed block itself may not be the final block, add an + empty one after this. */ + if is_final_block { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + jumpToByteBoundary(storage_ix, storage) + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster.go b/vendor/github.com/andybalholm/brotli/cluster.go new file mode 100644 index 00000000000..df8a3282245 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for clustering similar histograms together. */ + +type histogramPair struct { + idx1 uint32 + idx2 uint32 + cost_combo float64 + cost_diff float64 +} + +func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool { + if p1.cost_diff != p2.cost_diff { + return p1.cost_diff > p2.cost_diff + } + + return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1) +} + +/* Returns entropy reduction of the context map when we combine two clusters. */ +func clusterCostDiff(size_a uint, size_b uint) float64 { + var size_c uint = size_a + size_b + return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_command.go b/vendor/github.com/andybalholm/brotli/cluster_command.go new file mode 100644 index 00000000000..45b569bb2a5 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_command.go @@ -0,0 +1,164 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramCommand = out[idx1] + var cost_combo float64 + histogramAddHistogramCommand(&combo, &out[idx2]) + cost_combo = populationCostCommand(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramCommand = *histogram + histogramAddHistogramCommand(&tmp, candidate) + return populationCostCommand(&tmp) - candidate.bit_cost_ + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_distance.go b/vendor/github.com/andybalholm/brotli/cluster_distance.go new file mode 100644 index 00000000000..1aaa86e6ed8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_distance.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramDistance = out[idx1] + var cost_combo float64 + histogramAddHistogramDistance(&combo, &out[idx2]) + cost_combo = populationCostDistance(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramDistance = *histogram + histogramAddHistogramDistance(&tmp, candidate) + return populationCostDistance(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearDistance(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramDistance(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramDistance + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexDistance_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramDistance, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostDistance(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexDistance(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_literal.go b/vendor/github.com/andybalholm/brotli/cluster_literal.go new file mode 100644 index 00000000000..6ba66f31b2c --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_literal.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramLiteral = out[idx1] + var cost_combo float64 + histogramAddHistogramLiteral(&combo, &out[idx2]) + cost_combo = populationCostLiteral(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramLiteral = *histogram + histogramAddHistogramLiteral(&tmp, candidate) + return populationCostLiteral(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearLiteral(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramLiteral(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramLiteral + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexLiteral_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramLiteral, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostLiteral(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexLiteral(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/command.go b/vendor/github.com/andybalholm/brotli/command.go new file mode 100644 index 00000000000..b1662a55552 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/command.go @@ -0,0 +1,254 @@ +package brotli + +var kInsBase = []uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +var kInsExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, +} + +var kCopyBase = []uint32{ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 18, + 22, + 30, + 38, + 54, + 70, + 102, + 134, + 198, + 326, + 582, + 1094, + 2118, +} + +var kCopyExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, +} + +func getInsertLengthCode(insertlen uint) uint16 { + if insertlen < 6 { + return uint16(insertlen) + } else if insertlen < 130 { + var nbits uint32 = log2FloorNonZero(insertlen-2) - 1 + return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2) + } else if insertlen < 2114 { + return uint16(log2FloorNonZero(insertlen-66) + 10) + } else if insertlen < 6210 { + return 21 + } else if insertlen < 22594 { + return 22 + } else { + return 23 + } +} + +func getCopyLengthCode(copylen uint) uint16 { + if copylen < 10 { + return uint16(copylen - 2) + } else if copylen < 134 { + var nbits uint32 = log2FloorNonZero(copylen-6) - 1 + return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4) + } else if copylen < 2118 { + return uint16(log2FloorNonZero(copylen-70) + 12) + } else { + return 23 + } +} + +func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 { + var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3) + if use_last_distance && inscode < 8 && copycode < 16 { + if copycode < 8 { + return bits64 + } else { + return bits64 | 64 + } + } else { + /* Specification: 5 Encoding of ... (last table) */ + /* offset = 2 * index, where index is in range [0..8] */ + var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3)) + + /* All values in specification are K * 64, + where K = [2, 3, 6, 4, 5, 8, 7, 9, 10], + i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], + K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D. + All values in D require only 2 bits to encode. + Magic constant is shifted 6 bits left, to avoid final multiplication. */ + offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0) + + return uint16(offset | uint32(bits64)) + } +} + +func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) { + var inscode uint16 = getInsertLengthCode(insertlen) + var copycode uint16 = getCopyLengthCode(copylen) + *code = combineLengthCodes(inscode, copycode, use_last_distance) +} + +func getInsertBase(inscode uint16) uint32 { + return kInsBase[inscode] +} + +func getInsertExtra(inscode uint16) uint32 { + return kInsExtra[inscode] +} + +func getCopyBase(copycode uint16) uint32 { + return kCopyBase[copycode] +} + +func getCopyExtra(copycode uint16) uint32 { + return kCopyExtra[copycode] +} + +type command struct { + insert_len_ uint32 + copy_len_ uint32 + dist_extra_ uint32 + cmd_prefix_ uint16 + dist_prefix_ uint16 +} + +/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */ +func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) { + /* Don't rely on signed int representation, use honest casts. */ + var delta uint32 = uint32(byte(int8(copylen_code_delta))) + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = uint32(uint32(copylen) | delta<<25) + + /* The distance prefix and extra bits are stored in this Command as if + npostfix and ndirect were 0, they are only recomputed later after the + clustering if needed. */ + prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_) + + return cmd +} + +func makeInsertCommand(insertlen uint) (cmd command) { + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = 4 << 25 + cmd.dist_extra_ = 0 + cmd.dist_prefix_ = numDistanceShortCodes + getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_) + return cmd +} + +func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 { + if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes { + return uint32(self.dist_prefix_) & 0x3FF + } else { + var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF + var nbits uint32 = uint32(self.dist_prefix_) >> 10 + var extra uint32 = self.dist_extra_ + var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1 + var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits + var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask + var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4 + return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes + } +} + +func commandDistanceContext(self *command) uint32 { + var r uint32 = uint32(self.cmd_prefix_) >> 6 + var c uint32 = uint32(self.cmd_prefix_) & 7 + if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) { + return c + } + + return 3 +} + +func commandCopyLen(self *command) uint32 { + return self.copy_len_ & 0x1FFFFFF +} + +func commandCopyLenCode(self *command) uint32 { + var modifier uint32 = self.copy_len_ >> 25 + var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1))) + return uint32(int32(self.copy_len_&0x1FFFFFF) + delta) +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go new file mode 100644 index 00000000000..c9bd0577056 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment.go @@ -0,0 +1,834 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses one-pass processing: when we find a backward + match, we immediately emit the corresponding command and literal codes to + the bit stream. + + Adapted from the CompressFragment() function in + https://github.com/google/snappy/blob/master/snappy.cc */ + +const maxDistance_compress_fragment = 262128 + +func hash5(p []byte, shift uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 { + assert(offset >= 0) + assert(offset <= 3) + { + var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch5(p1 []byte, p2 []byte) bool { + return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) && + p1[4] == p2[4] +} + +/* Builds a literal prefix code into "depths" and "bits" based on the statistics + of the "input" string and stores it into the bit stream. + Note that the prefix code here is built from the pre-LZ77 input, therefore + we can only approximate the statistics of the actual literal stream. + Moreover, for long inputs we build a histogram from a sample of the input + and thus have to assign a non-zero depth for each literal. + Returns estimated compression ratio millibytes/char for encoding given input + with generated code. */ +func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint { + var histogram = [256]uint32{0} + var histogram_total uint + var i uint + if input_size < 1<<15 { + for i = 0; i < input_size; i++ { + histogram[input[i]]++ + } + + histogram_total = input_size + for i = 0; i < 256; i++ { + /* We weigh the first 11 samples with weight 3 to account for the + balancing effect of the LZ77 phase on the histogram. */ + var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } else { + const kSampleRate uint = 29 + for i = 0; i < input_size; i += kSampleRate { + histogram[input[i]]++ + } + + histogram_total = (input_size + kSampleRate - 1) / kSampleRate + for i = 0; i < 256; i++ { + /* We add 1 to each population count to avoid 0 bit depths (since this is + only a sample and we don't know if the symbol appears or not), and we + weigh the first 11 samples with weight 3 to account for the balancing + effect of the LZ77 phase on the histogram (more frequent symbols are + more likely to be in backward references instead as literals). */ + var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } + + buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */ + 8, depths, bits, storage_ix, storage) + { + var literal_ratio uint = 0 + for i = 0; i < 256; i++ { + if histogram[i] != 0 { + literal_ratio += uint(histogram[i] * uint32(depths[i])) + } + } + + /* Estimated encoding ratio, millibytes per symbol. */ + return (literal_ratio * 125) / histogram_total + } +} + +/* Builds a command and distance prefix code (each 64 symbols) into "depth" and + "bits" based on "histogram" and stores it into the bit stream. */ +func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[:24]) + + copy(cmd_depth[24:][:], depth[40:][:8]) + copy(cmd_depth[32:][:], depth[24:][:8]) + copy(cmd_depth[40:][:], depth[48:][:8]) + copy(cmd_depth[48:][:], depth[32:][:8]) + copy(cmd_depth[56:][:], depth[56:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[:24]) + copy(bits[24:], cmd_bits[32:][:8]) + copy(bits[32:], cmd_bits[48:][:8]) + copy(bits[40:], cmd_bits[24:][:8]) + copy(bits[48:], cmd_bits[40:][:8]) + copy(bits[56:], cmd_bits[56:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[:8]) + copy(cmd_depth[64:][:], depth[8:][:8]) + copy(cmd_depth[128:][:], depth[16:][:8]) + copy(cmd_depth[192:][:], depth[24:][:8]) + copy(cmd_depth[384:][:], depth[32:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[40+i] + cmd_depth[256+8*i] = depth[48+i] + cmd_depth[448+8*i] = depth[56+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +/* REQUIRES: insertlen < 6210 */ +func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) { + if insertlen < 6 { + var code uint = insertlen + 40 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + histo[code]++ + } else if insertlen < 130 { + var tail uint = insertlen - 2 + var nbits uint32 = log2FloorNonZero(tail) - 1 + var prefix uint = tail >> nbits + var inscode uint = uint((nbits << 1) + uint32(prefix) + 42) + writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 20) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 4) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> 5) + 30 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(5, uint64(tail)&31, storage_ix, storage) + writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage) + histo[code]++ + histo[64]++ + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint32 = log2FloorNonZero(tail) + var code uint = uint(nbits + 28) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<> nbits) & 1 + var offset uint = (2 + prefix) << nbits + var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80) + writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage) + writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage) + histo[distcode]++ +} + +func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var j uint + for j = 0; j < len; j++ { + var lit byte = input[j] + writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage) + } +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func updateBits(n_bits uint, bits uint32, pos uint, array []byte) { + for n_bits > 0 { + var byte_pos uint = pos >> 3 + var n_unchanged_bits uint = pos & 7 + var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits) + var total_bits uint = n_unchanged_bits + n_changed_bits + var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1) + var unchanged_bits uint32 = uint32(array[byte_pos]) & mask + var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1) + array[byte_pos] = byte(changed_bits<>= n_changed_bits + pos += n_changed_bits + } +} + +func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +var shouldMergeBlock_kSampleRate uint = 43 + +func shouldMergeBlock(data []byte, len uint, depths []byte) bool { + var histo = [256]uint{0} + var i uint + for i = 0; i < len; i += shouldMergeBlock_kSampleRate { + histo[data[i]]++ + } + { + var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate + var r float64 = (fastLog2(total)+0.5)*float64(total) + 200 + for i = 0; i < 256; i++ { + r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i])) + } + + return r >= 0.0 + } +} + +func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool { + var compressed uint = uint(-cap(next_emit) + cap(metablock_start)) + if compressed*50 > insertlen { + return false + } else { + return literal_ratio > 980 + } +} + +func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) { + var len uint = uint(-cap(end) + cap(begin)) + rewindBitPosition1(storage_ix_start, storage_ix, storage) + storeMetaBlockHeader1(uint(len), true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], begin[:len]) + *storage_ix += uint(len << 3) + storage[*storage_ix>>3] = 0 +} + +var kCmdHistoSeed = [128]uint32{ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, +} + +var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15 +var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16 + +func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var cmd_histo [128]uint32 + var ip_end int + var next_emit int = 0 + var base_ip int = 0 + var input int = 0 + const kInputMarginBytes uint = windowGap + const kMinMatchLen uint = 5 + var metablock_start int = input + var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + var total_block_size uint = block_size + var mlen_storage_ix uint = *storage_ix + 3 + var lit_depth [256]byte + var lit_bits [256]uint16 + var literal_ratio uint + var ip int + var last_distance int + var shift uint = 64 - table_bits + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + + /* Save the start of the first block for position and distance computations. + */ + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + { + /* Store the pre-compressed command and distance prefix codes. */ + var i uint + for i = 0; i+7 < *cmd_code_numbits; i += 8 { + writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage) + } + } + + writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage) + + /* Initialize the command and distance histograms. We will gather + statistics of command and distance codes during the processing + of this block and use it to update the command and distance + prefix codes for the next block. */ +emit_commands: + copy(cmd_histo[:], kCmdHistoSeed[:]) + + /* "ip" is the input pointer. */ + ip = input + + last_distance = -1 + ip_end = int(uint(input) + block_size) + + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes) + var ip_limit int = int(uint(input) + len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash5(in[ip:], shift); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 5-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (i.e. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + assert(hash == hash5(in[next_ip:], shift)) + ip = next_ip + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash5(in[next_ip:], shift) + candidate = ip - last_distance + if isMatch5(in[ip:], in[candidate:]) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch5(in[ip:], in[candidate:]) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit" to the bit stream, and then see if we can find a next match + immediately afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + var distance int = int(base - candidate) + /* We have a 5-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert uint = uint(base - next_emit) + ip += int(matched) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage) + input_size -= uint(base - input) + input = base + next_emit = input + goto next_block + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } + + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + if distance == last_distance { + writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage) + cmd_histo[64]++ + } else { + emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + last_distance = distance + } + + emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for isMatch5(in[ip:], in[candidate:]) { + var base int = ip + /* We have a 5-byte match at ip, and no need to emit any literal bytes + prior to ip. */ + + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + if ip-candidate > maxDistance_compress_fragment { + break + } + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash5(in[ip:], shift) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + input += int(block_size) + input_size -= block_size + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize) + + /* Decide if we want to continue this meta-block instead of emitting the + last insert-only command. */ + if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) { + assert(total_block_size > 1<<16) + + /* Update the size of the current meta-block and continue emitting commands. + We can do this because the current size and the new size both have 5 + nibbles. */ + total_block_size += block_size + + updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage) + goto emit_commands + } + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint = uint(ip_end - next_emit) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage) + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } + } + + next_emit = ip_end + + /* If we have more data, write a new meta-block header and prefix codes and + then continue emitting commands. */ +next_block: + if input_size > 0 { + metablock_start = input + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + total_block_size = block_size + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + mlen_storage_ix = *storage_ix + 3 + + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage) + goto emit_commands + } + + if !is_last { + /* If this is not the last block, update the command and distance prefix + codes for the next block and store the compressed forms. */ + cmd_code[0] = 0 + + *cmd_code_numbits = 0 + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code) + } +} + +/* Compresses "input" string to the "*storage" buffer as one or more complete + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + "cmd_depth" and "cmd_bits" contain the command and distance prefix codes + (see comment in encode.h) used for the encoding of this input fragment. + If "is_last" is 0, they are updated to reflect the statistics + of this input fragment, to be used for the encoding of the next fragment. + + "*cmd_code_numbits" is the number of bits of the compressed representation + of the command and distance prefix codes, and "cmd_code" is an array of + at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed + command and distance prefix codes. If "is_last" is 0, these are also + updated to represent the updated "cmd_depth" and "cmd_bits". + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ +func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + + if input_size == 0 { + assert(is_last) + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go new file mode 100644 index 00000000000..172dc7f4607 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go @@ -0,0 +1,748 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses two-pass processing: in the first pass we save + the found backward matches and literal bytes into a buffer, and in the + second pass we emit them into the bit stream using prefix codes built based + on the actual command and literal byte histograms. */ + +const kCompressFragmentTwoPassBlockSize uint = 1 << 17 + +func hash1(p []byte, shift uint, length uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 { + assert(offset <= 8-length) + { + var h uint64 = ((v >> (8 * offset)) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch1(p1 []byte, p2 []byte, length uint) bool { + if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) { + return false + } + if length == 4 { + return true + } + return p1[4] == p2[4] && p1[5] == p2[5] +} + +/* Builds a command and distance prefix code (each 64 symbols) into "depth" and + "bits" based on "histogram" and stores it into the bit stream. */ +func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[24:][:24]) + + copy(cmd_depth[24:][:], depth[:8]) + copy(cmd_depth[32:][:], depth[48:][:8]) + copy(cmd_depth[40:][:], depth[8:][:8]) + copy(cmd_depth[48:][:], depth[56:][:8]) + copy(cmd_depth[56:][:], depth[16:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[24:][:8]) + copy(bits[8:], cmd_bits[40:][:8]) + copy(bits[16:], cmd_bits[56:][:8]) + copy(bits[24:], cmd_bits[:24]) + copy(bits[48:], cmd_bits[32:][:8]) + copy(bits[56:], cmd_bits[48:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[24:][:8]) + copy(cmd_depth[64:][:], depth[32:][:8]) + copy(cmd_depth[128:][:], depth[40:][:8]) + copy(cmd_depth[192:][:], depth[48:][:8]) + copy(cmd_depth[384:][:], depth[56:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[i] + cmd_depth[256+8*i] = depth[8+i] + cmd_depth[448+8*i] = depth[16+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +func emitInsertLen(insertlen uint32, commands *[]uint32) { + if insertlen < 6 { + (*commands)[0] = insertlen + } else if insertlen < 130 { + var tail uint32 = insertlen - 2 + var nbits uint32 = log2FloorNonZero(uint(tail)) - 1 + var prefix uint32 = tail >> nbits + var inscode uint32 = (nbits << 1) + prefix + 2 + var extra uint32 = tail - (prefix << nbits) + (*commands)[0] = inscode | extra<<8 + } else if insertlen < 2114 { + var tail uint32 = insertlen - 66 + var nbits uint32 = log2FloorNonZero(uint(tail)) + var code uint32 = nbits + 10 + var extra uint32 = tail - (1 << nbits) + (*commands)[0] = code | extra<<8 + } else if insertlen < 6210 { + var extra uint32 = insertlen - 2114 + (*commands)[0] = 21 | extra<<8 + } else if insertlen < 22594 { + var extra uint32 = insertlen - 6210 + (*commands)[0] = 22 | extra<<8 + } else { + var extra uint32 = insertlen - 22594 + (*commands)[0] = 23 | extra<<8 + } + + *commands = (*commands)[1:] +} + +func emitCopyLen(copylen uint, commands *[]uint32) { + if copylen < 10 { + (*commands)[0] = uint32(copylen + 38) + } else if copylen < 134 { + var tail uint = copylen - 6 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 44 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else if copylen < 2118 { + var tail uint = copylen - 70 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else { + var extra uint = copylen - 2118 + (*commands)[0] = uint32(63 | extra<<8) + } + + *commands = (*commands)[1:] +} + +func emitCopyLenLastDistance(copylen uint, commands *[]uint32) { + if copylen < 12 { + (*commands)[0] = uint32(copylen + 20) + *commands = (*commands)[1:] + } else if copylen < 72 { + var tail uint = copylen - 8 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 28 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + } else if copylen < 136 { + var tail uint = copylen - 8 + var code uint = (tail >> 5) + 54 + var extra uint = tail & 31 + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + var extra uint = copylen - 2120 + (*commands)[0] = uint32(63 | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } +} + +func emitDistance(distance uint32, commands *[]uint32) { + var d uint32 = distance + 3 + var nbits uint32 = log2FloorNonZero(uint(d)) - 1 + var prefix uint32 = (d >> nbits) & 1 + var offset uint32 = (2 + prefix) << nbits + var distcode uint32 = 2*(nbits-1) + prefix + 80 + var extra uint32 = d - offset + (*commands)[0] = distcode | extra<<8 + *commands = (*commands)[1:] +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { + var ip int = 0 + var shift uint = 64 - table_bits + var ip_end int = int(block_size) + var base_ip int = -cap(base_ip_ptr) + cap(input) + var next_emit int = 0 + var last_distance int = -1 + /* "ip" is the input pointer. */ + + const kInputMarginBytes uint = windowGap + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-min_match, input_size-kInputMarginBytes) + var ip_limit int = int(len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash1(input[ip:], shift, min_match); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 6-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (ie. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + ip = next_ip + assert(hash == hash1(input[ip:], shift, min_match)) + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash1(input[next_ip:], shift, min_match) + candidate = ip - last_distance + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit", and then see if we can find a next match immediately + afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + var distance int = int(base - candidate) + /* We have a 6-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert int = int(base - next_emit) + ip += int(matched) + emitInsertLen(uint32(insert), commands) + copy(*literals, input[next_emit:][:uint(insert)]) + *literals = (*literals)[insert:] + if distance == last_distance { + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + emitDistance(uint32(distance), commands) + last_distance = distance + } + + emitCopyLenLastDistance(matched, commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for ip-candidate <= maxDistance_compress_fragment && isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + var base int = ip + /* We have a 6-byte match at ip, and no need to emit any + literal bytes prior to ip. */ + + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen(matched, commands) + emitDistance(uint32(last_distance), commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash1(input[ip:], shift, min_match) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint32 = uint32(ip_end - next_emit) + emitInsertLen(insert, commands) + copy(*literals, input[next_emit:][:insert]) + *literals = (*literals)[insert:] + } +} + +var storeCommands_kNumExtraBits = [128]uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 6, + 7, + 7, + 8, + 8, + 9, + 9, + 10, + 10, + 11, + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 21, + 21, + 22, + 22, + 23, + 23, + 24, + 24, +} +var storeCommands_kInsertOffset = [24]uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) { + var lit_depths [256]byte + var lit_bits [256]uint16 + var lit_histo = [256]uint32{0} + var cmd_depths = [128]byte{0} + var cmd_bits = [128]uint16{0} + var cmd_histo = [128]uint32{0} + var i uint + for i = 0; i < num_literals; i++ { + lit_histo[literals[i]]++ + } + + buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */ + 8, lit_depths[:], lit_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var code uint32 = commands[i] & 0xFF + assert(code < 128) + cmd_histo[code]++ + } + + cmd_histo[1] += 1 + cmd_histo[2] += 1 + cmd_histo[64] += 1 + cmd_histo[84] += 1 + buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var cmd uint32 = commands[i] + var code uint32 = cmd & 0xFF + var extra uint32 = cmd >> 8 + assert(code < 128) + writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage) + writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage) + if code < 24 { + var insert uint32 = storeCommands_kInsertOffset[code] + extra + var j uint32 + for j = 0; j < insert; j++ { + var lit byte = literals[0] + writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage) + literals = literals[1:] + } + } + } +} + +/* Acceptable loss for uncompressible speedup is 2% */ +const minRatio = 0.98 + +const sampleRate = 43 + +func shouldCompress(input []byte, input_size uint, num_literals uint) bool { + var corpus_size float64 = float64(input_size) + if float64(num_literals) < minRatio*corpus_size { + return true + } else { + var literal_histo = [256]uint32{0} + var max_total_bit_cost float64 = corpus_size * 8 * minRatio / sampleRate + var i uint + for i = 0; i < input_size; i += sampleRate { + literal_histo[input[i]]++ + } + + return bitsEntropy(literal_histo[:], 256) < max_total_bit_cost + } +} + +func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) { + storeMetaBlockHeader(input_size, true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], input[:input_size]) + *storage_ix += input_size << 3 + storage[*storage_ix>>3] = 0 +} + +func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) { + /* Save the start of the first block for position and distance computations. + */ + var base_ip []byte = input + + for input_size > 0 { + var block_size uint = brotli_min_size_t(input_size, kCompressFragmentTwoPassBlockSize) + var commands []uint32 = command_buf + var literals []byte = literal_buf + var num_literals uint + createCommands(input, block_size, input_size, base_ip, table, table_bits, min_match, &literals, &commands) + num_literals = uint(-cap(literals) + cap(literal_buf)) + if shouldCompress(input, block_size, num_literals) { + var num_commands uint = uint(-cap(commands) + cap(command_buf)) + storeMetaBlockHeader(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage) + } else { + /* Since we did not find many backward references and the entropy of + the data is close to 8 bits, we can simply emit an uncompressed block. + This makes compression speed of uncompressible data about 3x faster. */ + emitUncompressedMetaBlock(input, block_size, storage_ix, storage) + } + + input = input[block_size:] + input_size -= block_size + } +} + +/* Compresses "input" string to the "*storage" buffer as one or more complete + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: "command_buf" and "literal_buf" point to at least + kCompressFragmentTwoPassBlockSize long arrays. + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is a power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ +func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + var min_match uint + if table_bits <= 15 { + min_match = 4 + } else { + min_match = 6 + } + compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + rewindBitPosition(initial_storage_ix, storage_ix, storage) + emitUncompressedMetaBlock(input, input_size, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/constants.go b/vendor/github.com/andybalholm/brotli/constants.go new file mode 100644 index 00000000000..a880dff789d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/constants.go @@ -0,0 +1,77 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Specification: 7.3. Encoding of the context map */ +const contextMapMaxRle = 16 + +/* Specification: 2. Compressed representation overview */ +const maxNumberOfBlockTypes = 256 + +/* Specification: 3.3. Alphabet sizes: insert-and-copy length */ +const numLiteralSymbols = 256 + +const numCommandSymbols = 704 + +const numBlockLenSymbols = 26 + +const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle) + +const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2) + +/* Specification: 3.5. Complex prefix codes */ +const repeatPreviousCodeLength = 16 + +const repeatZeroCodeLength = 17 + +const codeLengthCodes = (repeatZeroCodeLength + 1) + +/* "code length of 8 is repeated" */ +const initialRepeatedCodeLength = 8 + +/* "Large Window Brotli" */ +const largeMaxDistanceBits = 62 + +const largeMinWbits = 10 + +const largeMaxWbits = 30 + +/* Specification: 4. Encoding of distances */ +const numDistanceShortCodes = 16 + +const maxNpostfix = 3 + +const maxNdirect = 120 + +const maxDistanceBits = 24 + +func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint { + return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1)) +} + +/* numDistanceSymbols == 1128 */ +const numDistanceSymbols = 1128 + +const maxDistance = 0x3FFFFFC + +const maxAllowedDistance = 0x7FFFFFFC + +/* 7.1. Context modes and context ID lookup for literals */ +/* "context IDs for literals are in the range of 0..63" */ +const literalContextBits = 6 + +/* 7.2. Context ID for distances */ +const distanceContextBits = 2 + +/* 9.1. Format of the Stream Header */ +/* Number of slack bytes for window size. Don't confuse + with BROTLI_NUM_DISTANCE_SHORT_CODES. */ +const windowGap = 16 + +func maxBackwardLimit(W uint) uint { + return (uint(1) << W) - windowGap +} diff --git a/vendor/github.com/andybalholm/brotli/context.go b/vendor/github.com/andybalholm/brotli/context.go new file mode 100644 index 00000000000..884ff8a2d69 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/context.go @@ -0,0 +1,2176 @@ +package brotli + +/* Lookup table to map the previous two bytes to a context id. + +There are four different context modeling modes defined here: + contextLSB6: context id is the least significant 6 bits of the last byte, + contextMSB6: context id is the most significant 6 bits of the last byte, + contextUTF8: second-order context model tuned for UTF8-encoded text, + contextSigned: second-order context model tuned for signed integers. + +If |p1| and |p2| are the previous two bytes, and |mode| is current context +mode, we calculate the context as: + + context = ContextLut(mode)[p1] | ContextLut(mode)[p2 + 256]. + +For contextUTF8 mode, if the previous two bytes are ASCII characters +(i.e. < 128), this will be equivalent to + + context = 4 * context1(p1) + context2(p2), + +where context1 is based on the previous byte in the following way: + + 0 : non-ASCII control + 1 : \t, \n, \r + 2 : space + 3 : other punctuation + 4 : " ' + 5 : % + 6 : ( < [ { + 7 : ) > ] } + 8 : , ; : + 9 : . + 10 : = + 11 : number + 12 : upper-case vowel + 13 : upper-case consonant + 14 : lower-case vowel + 15 : lower-case consonant + +and context2 is based on the second last byte: + + 0 : control, space + 1 : punctuation + 2 : upper-case letter, number + 3 : lower-case letter + +If the last byte is ASCII, and the second last byte is not (in a valid UTF8 +stream it will be a continuation byte, value between 128 and 191), the +context is the same as if the second last byte was an ASCII control or space. + +If the last byte is a UTF8 lead byte (value >= 192), then the next byte will +be a continuation byte and the context id is 2 or 3 depending on the LSB of +the last byte and to a lesser extent on the second last byte if it is ASCII. + +If the last byte is a UTF8 continuation byte, the second last byte can be: + - continuation byte: the next byte is probably ASCII or lead byte (assuming + 4-byte UTF8 characters are rare) and the context id is 0 or 1. + - lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1 + - lead byte (208 - 255): next byte is continuation byte, context is 2 or 3 + +The possible value combinations of the previous two bytes, the range of +context ids and the type of the next byte is summarized in the table below: + +|--------\-----------------------------------------------------------------| +| \ Last byte | +| Second \---------------------------------------------------------------| +| last byte \ ASCII | cont. byte | lead byte | +| \ (0-127) | (128-191) | (192-) | +|=============|===================|=====================|==================| +| ASCII | next: ASCII/lead | not valid | next: cont. | +| (0-127) | context: 4 - 63 | | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. | +| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: ASCII/lead | not valid | +| (192-207) | | context: 0 - 1 | | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: cont. | not valid | +| (208-) | | context: 2 - 3 | | +|-------------|-------------------|---------------------|------------------| +*/ + +const ( + contextLSB6 = 0 + contextMSB6 = 1 + contextUTF8 = 2 + contextSigned = 3 +) + +/* Common context lookup table for all context modes. */ +var kContextLookup = [2048]byte{ + /* CONTEXT_LSB6, last byte. */ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + + /* CONTEXT_LSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_MSB6, last byte. */ + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 7, + 7, + 7, + 7, + 8, + 8, + 8, + 8, + 9, + 9, + 9, + 9, + 10, + 10, + 10, + 10, + 11, + 11, + 11, + 11, + 12, + 12, + 12, + 12, + 13, + 13, + 13, + 13, + 14, + 14, + 14, + 14, + 15, + 15, + 15, + 15, + 16, + 16, + 16, + 16, + 17, + 17, + 17, + 17, + 18, + 18, + 18, + 18, + 19, + 19, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 25, + 25, + 25, + 25, + 26, + 26, + 26, + 26, + 27, + 27, + 27, + 27, + 28, + 28, + 28, + 28, + 29, + 29, + 29, + 29, + 30, + 30, + 30, + 30, + 31, + 31, + 31, + 31, + 32, + 32, + 32, + 32, + 33, + 33, + 33, + 33, + 34, + 34, + 34, + 34, + 35, + 35, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 43, + 43, + 43, + 43, + 44, + 44, + 44, + 44, + 45, + 45, + 45, + 45, + 46, + 46, + 46, + 46, + 47, + 47, + 47, + 47, + 48, + 48, + 48, + 48, + 49, + 49, + 49, + 49, + 50, + 50, + 50, + 50, + 51, + 51, + 51, + 51, + 52, + 52, + 52, + 52, + 53, + 53, + 53, + 53, + 54, + 54, + 54, + 54, + 55, + 55, + 55, + 55, + 56, + 56, + 56, + 56, + 57, + 57, + 57, + 57, + 58, + 58, + 58, + 58, + 59, + 59, + 59, + 59, + 60, + 60, + 60, + 60, + 61, + 61, + 61, + 61, + 62, + 62, + 62, + 62, + 63, + 63, + 63, + 63, + + /* CONTEXT_MSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_UTF8, last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4, + 4, + 0, + 0, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8, + 12, + 16, + 12, + 12, + 20, + 12, + 16, + 24, + 28, + 12, + 12, + 32, + 12, + 36, + 12, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 32, + 32, + 24, + 40, + 28, + 12, + 12, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 24, + 12, + 28, + 12, + 12, + 12, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 24, + 12, + 28, + 12, + 0, + + /* UTF8 continuation byte range. */ + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + + /* UTF8 lead byte range. */ + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + + /* CONTEXT_UTF8 second last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 1, + 1, + 1, + 1, + 0, + + /* UTF8 continuation byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* UTF8 lead byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + + /* CONTEXT_SIGNED, last byte, same as the above values shifted by 3 bits. */ + 0, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 56, + + /* CONTEXT_SIGNED, second last byte. */ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 7, +} + +type contextLUT []byte + +func getContextLUT(mode int) contextLUT { + return kContextLookup[mode<<9:] +} + +func getContext(p1 byte, p2 byte, lut contextLUT) byte { + return lut[p1] | lut[256+int(p2)] +} diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go new file mode 100644 index 00000000000..6a73b88af20 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/decode.go @@ -0,0 +1,2586 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +const ( + decoderResultError = 0 + decoderResultSuccess = 1 + decoderResultNeedsMoreInput = 2 + decoderResultNeedsMoreOutput = 3 +) + +/** + * Error code for detailed logging / production debugging. + * + * See ::BrotliDecoderGetErrorCode and ::BROTLI_LAST_ERROR_CODE. + */ +const ( + decoderNoError = 0 + decoderSuccess = 1 + decoderNeedsMoreInput = 2 + decoderNeedsMoreOutput = 3 + decoderErrorFormatExuberantNibble = -1 + decoderErrorFormatReserved = -2 + decoderErrorFormatExuberantMetaNibble = -3 + decoderErrorFormatSimpleHuffmanAlphabet = -4 + decoderErrorFormatSimpleHuffmanSame = -5 + decoderErrorFormatClSpace = -6 + decoderErrorFormatHuffmanSpace = -7 + decoderErrorFormatContextMapRepeat = -8 + decoderErrorFormatBlockLength1 = -9 + decoderErrorFormatBlockLength2 = -10 + decoderErrorFormatTransform = -11 + decoderErrorFormatDictionary = -12 + decoderErrorFormatWindowBits = -13 + decoderErrorFormatPadding1 = -14 + decoderErrorFormatPadding2 = -15 + decoderErrorFormatDistance = -16 + decoderErrorDictionaryNotSet = -19 + decoderErrorInvalidArguments = -20 + decoderErrorAllocContextModes = -21 + decoderErrorAllocTreeGroups = -22 + decoderErrorAllocContextMap = -25 + decoderErrorAllocRingBuffer1 = -26 + decoderErrorAllocRingBuffer2 = -27 + decoderErrorAllocBlockTypeTrees = -30 + decoderErrorUnreachable = -31 +) + +const huffmanTableBits = 8 + +const huffmanTableMask = 0xFF + +/* We need the slack region for the following reasons: + - doing up to two 16-byte copies for fast backward copying + - inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ +const kRingBufferWriteAheadSlack uint32 = 42 + +var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +/* Static prefix code for the complex code length code lengths. */ +var kCodeLengthPrefixLength = [16]byte{2, 2, 2, 3, 2, 2, 2, 4, 2, 2, 2, 3, 2, 2, 2, 4} + +var kCodeLengthPrefixValue = [16]byte{0, 4, 3, 2, 0, 4, 3, 1, 0, 4, 3, 2, 0, 4, 3, 5} + +/* Saves error code and converts it to BrotliDecoderResult. */ +func saveErrorCode(s *Reader, e int) int { + s.error_code = int(e) + switch e { + case decoderSuccess: + return decoderResultSuccess + + case decoderNeedsMoreInput: + return decoderResultNeedsMoreInput + + case decoderNeedsMoreOutput: + return decoderResultNeedsMoreOutput + + default: + return decoderResultError + } +} + +/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli". + Precondition: bit-reader accumulator has at least 8 bits. */ +func decodeWindowBits(s *Reader, br *bitReader) int { + var n uint32 + var large_window bool = s.large_window + s.large_window = false + takeBits(br, 1, &n) + if n == 0 { + s.window_bits = 16 + return decoderSuccess + } + + takeBits(br, 3, &n) + if n != 0 { + s.window_bits = 17 + n + return decoderSuccess + } + + takeBits(br, 3, &n) + if n == 1 { + if large_window { + takeBits(br, 1, &n) + if n == 1 { + return decoderErrorFormatWindowBits + } + + s.large_window = true + return decoderSuccess + } else { + return decoderErrorFormatWindowBits + } + } + + if n != 0 { + s.window_bits = 8 + n + return decoderSuccess + } + + s.window_bits = 17 + return decoderSuccess +} + +/* Decodes a number in the range [0..255], by reading 1 - 11 bits. */ +func decodeVarLenUint8(s *Reader, br *bitReader, value *uint32) int { + var bits uint32 + switch s.substate_decode_uint8 { + case stateDecodeUint8None: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 0 + return decoderSuccess + } + fallthrough + + /* Fall through. */ + case stateDecodeUint8Short: + if !safeReadBits(br, 3, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Short + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 1 + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + } + + /* Use output value as a temporary storage. It MUST be persisted. */ + *value = bits + fallthrough + + /* Fall through. */ + case stateDecodeUint8Long: + if !safeReadBits(br, *value, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Long + return decoderNeedsMoreInput + } + + *value = (1 << *value) + bits + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a metablock length and flags by reading 2 - 31 bits. */ +func decodeMetaBlockLength(s *Reader, br *bitReader) int { + var bits uint32 + var i int + for { + switch s.substate_metablock_header { + case stateMetablockHeaderNone: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_last_metablock = 1 + } else { + s.is_last_metablock = 0 + } + s.meta_block_remaining_len = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + if s.is_last_metablock == 0 { + s.substate_metablock_header = stateMetablockHeaderNibbles + break + } + + s.substate_metablock_header = stateMetablockHeaderEmpty + fallthrough + + /* Fall through. */ + case stateMetablockHeaderEmpty: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.substate_metablock_header = stateMetablockHeaderNibbles + fallthrough + + /* Fall through. */ + case stateMetablockHeaderNibbles: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + s.size_nibbles = uint(byte(bits + 4)) + s.loop_counter = 0 + if bits == 3 { + s.is_metadata = 1 + s.substate_metablock_header = stateMetablockHeaderReserved + break + } + + s.substate_metablock_header = stateMetablockHeaderSize + fallthrough + + /* Fall through. */ + case stateMetablockHeaderSize: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 4, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 4 && bits == 0 { + return decoderErrorFormatExuberantNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*4)) + } + + s.substate_metablock_header = stateMetablockHeaderUncompressed + fallthrough + + /* Fall through. */ + case stateMetablockHeaderUncompressed: + if s.is_last_metablock == 0 { + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_uncompressed = 1 + } else { + s.is_uncompressed = 0 + } + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + case stateMetablockHeaderReserved: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + return decoderErrorFormatReserved + } + + s.substate_metablock_header = stateMetablockHeaderBytes + fallthrough + + /* Fall through. */ + case stateMetablockHeaderBytes: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.size_nibbles = uint(byte(bits)) + s.substate_metablock_header = stateMetablockHeaderMetadata + fallthrough + + /* Fall through. */ + case stateMetablockHeaderMetadata: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 8, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 1 && bits == 0 { + return decoderErrorFormatExuberantMetaNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*8)) + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes the Huffman code. + This method doesn't read data from the bit reader, BUT drops the amount of + bits that correspond to the decoded symbol. + bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */ +func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 { + table = table[bits&huffmanTableMask:] + if table[0].bits > huffmanTableBits { + var nbits uint32 = uint32(table[0].bits) - huffmanTableBits + dropBits(br, huffmanTableBits) + table = table[uint32(table[0].value)+((bits>>huffmanTableBits)&bitMask(nbits)):] + } + + dropBits(br, uint32(table[0].bits)) + return uint32(table[0].value) +} + +/* Reads and decodes the next Huffman code from bit-stream. + This method peeks 16 bits of input and drops 0 - 15 of them. */ +func readSymbol(table []huffmanCode, br *bitReader) uint32 { + return decodeSymbol(get16BitsUnmasked(br), table, br) +} + +/* Same as DecodeSymbol, but it is known that there is less than 15 bits of + input are currently available. */ +func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + var available_bits uint32 = getAvailableBits(br) + if available_bits == 0 { + if table[0].bits == 0 { + *result = uint32(table[0].value) + return true + } + + return false /* No valid bits at all. */ + } + + val = uint32(getBitsUnmasked(br)) + table = table[val&huffmanTableMask:] + if table[0].bits <= huffmanTableBits { + if uint32(table[0].bits) <= available_bits { + dropBits(br, uint32(table[0].bits)) + *result = uint32(table[0].value) + return true + } else { + return false /* Not enough bits for the first level. */ + } + } + + if available_bits <= huffmanTableBits { + return false /* Not enough bits to move to the second level. */ + } + + /* Speculatively drop HUFFMAN_TABLE_BITS. */ + val = (val & bitMask(uint32(table[0].bits))) >> huffmanTableBits + + available_bits -= huffmanTableBits + table = table[uint32(table[0].value)+val:] + if available_bits < uint32(table[0].bits) { + return false /* Not enough bits for the second level. */ + } + + dropBits(br, huffmanTableBits+uint32(table[0].bits)) + *result = uint32(table[0].value) + return true +} + +func safeReadSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + if safeGetBits(br, 15, &val) { + *result = decodeSymbol(val, table, br) + return true + } + + return safeDecodeSymbol(table, br, result) +} + +/* Makes a look-up in first level Huffman table. Peeks 8 bits. */ +func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, value *uint32) { + if safe != 0 { + return + } + + table = table[getBits(br, huffmanTableBits):] + *bits = uint32(table[0].bits) + *value = uint32(table[0].value) +} + +/* Decodes the next Huffman code using data prepared by PreloadSymbol. + Reads 0 - 15 bits. Also peeks 8 following bits. */ +func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 { + var result uint32 = *value + var ext []huffmanCode + if *bits > huffmanTableBits { + var val uint32 = get16BitsUnmasked(br) + ext = table[val&huffmanTableMask:][*value:] + var mask uint32 = bitMask((*bits - huffmanTableBits)) + dropBits(br, huffmanTableBits) + ext = ext[(val>>huffmanTableBits)&mask:] + dropBits(br, uint32(ext[0].bits)) + result = uint32(ext[0].value) + } else { + dropBits(br, *bits) + } + + preloadSymbol(0, table, br, bits, value) + return result +} + +func log2Floor(x uint32) uint32 { + var result uint32 = 0 + for x != 0 { + x >>= 1 + result++ + } + + return result +} + +/* Reads (s->symbol + 1) symbols. + Totally 1..4 symbols are read, 1..11 bits each. + The list of symbols MUST NOT contain duplicates. */ +func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int { + var br *bitReader = &s.br + var max_bits uint32 = log2Floor(alphabet_size - 1) + var i uint32 = s.sub_loop_counter + /* max_bits == 1..11; symbol == 0..3; 1..44 bits will be read. */ + + var num_symbols uint32 = s.symbol + for i <= num_symbols { + var v uint32 + if !safeReadBits(br, max_bits, &v) { + s.sub_loop_counter = i + s.substate_huffman = stateHuffmanSimpleRead + return decoderNeedsMoreInput + } + + if v >= max_symbol { + return decoderErrorFormatSimpleHuffmanAlphabet + } + + s.symbols_lists_array[i] = uint16(v) + i++ + } + + for i = 0; i < num_symbols; i++ { + var k uint32 = i + 1 + for ; k <= num_symbols; k++ { + if s.symbols_lists_array[i] == s.symbols_lists_array[k] { + return decoderErrorFormatSimpleHuffmanSame + } + } + } + + return decoderSuccess +} + +/* Process single decoded symbol code length: + A) reset the repeat variable + B) remember code length (if it is not 0) + C) extend corresponding index-chain + D) reduce the Huffman space + E) update the histogram */ +func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + *repeat = 0 + if code_len != 0 { /* code_len == 1..15 */ + symbolListPut(symbol_lists, next_symbol[code_len], uint16(*symbol)) + next_symbol[code_len] = int(*symbol) + *prev_code_len = code_len + *space -= 32768 >> code_len + code_length_histo[code_len]++ + } + + (*symbol)++ +} + +/* Process repeated symbol code length. + A) Check if it is the extension of previous repeat sequence; if the decoded + value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new + symbol-skip + B) Update repeat variable + C) Check if operation is feasible (fits alphabet) + D) For each symbol do the same operations as in ProcessSingleCodeLength + + PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or + code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */ +func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ + var extra_bits uint32 = 3 + var new_len uint32 = 0 + if code_len == repeatPreviousCodeLength { + new_len = *prev_code_len + extra_bits = 2 + } + + if *repeat_code_len != new_len { + *repeat = 0 + *repeat_code_len = new_len + } + + old_repeat = *repeat + if *repeat > 0 { + *repeat -= 2 + *repeat <<= extra_bits + } + + *repeat += repeat_delta + 3 + repeat_delta = *repeat - old_repeat + if *symbol+repeat_delta > alphabet_size { + *symbol = alphabet_size + *space = 0xFFFFF + return + } + + if *repeat_code_len != 0 { + var last uint = uint(*symbol + repeat_delta) + var next int = next_symbol[*repeat_code_len] + for { + symbolListPut(symbol_lists, next, uint16(*symbol)) + next = int(*symbol) + (*symbol)++ + if (*symbol) == uint32(last) { + break + } + } + + next_symbol[*repeat_code_len] = next + *space -= repeat_delta << (15 - *repeat_code_len) + code_length_histo[*repeat_code_len] = uint16(uint32(code_length_histo[*repeat_code_len]) + repeat_delta) + } else { + *symbol += repeat_delta + } +} + +/* Reads and decodes symbol codelengths. */ +func readSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var symbol uint32 = s.symbol + var repeat uint32 = s.repeat + var space uint32 = s.space + var prev_code_len uint32 = s.prev_code_len + var repeat_code_len uint32 = s.repeat_code_len + var symbol_lists symbolList = s.symbol_lists + var code_length_histo []uint16 = s.code_length_histo[:] + var next_symbol []int = s.next_symbol[:] + if !warmupBitReader(br) { + return decoderNeedsMoreInput + } + var p []huffmanCode + for symbol < alphabet_size && space > 0 { + p = s.table[:] + var code_len uint32 + if !checkInputAmount(br, shortFillBitWindowRead) { + s.symbol = symbol + s.repeat = repeat + s.prev_code_len = prev_code_len + s.repeat_code_len = repeat_code_len + s.space = space + return decoderNeedsMoreInput + } + + fillBitWindow16(br) + p = p[getBitsUnmasked(br)&uint64(bitMask(huffmanMaxCodeLengthCodeLength)):] + dropBits(br, uint32(p[0].bits)) /* Use 1..5 bits. */ + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + processSingleCodeLength(code_len, &symbol, &repeat, &space, &prev_code_len, symbol_lists, code_length_histo, next_symbol) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 + if code_len == repeatPreviousCodeLength { + extra_bits = 2 + } else { + extra_bits = 3 + } + var repeat_delta uint32 = uint32(getBitsUnmasked(br)) & bitMask(extra_bits) + dropBits(br, extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &symbol, &repeat, &space, &prev_code_len, &repeat_code_len, symbol_lists, code_length_histo, next_symbol) + } + } + + s.space = space + return decoderSuccess +} + +func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var get_byte bool = false + var p []huffmanCode + for s.symbol < alphabet_size && s.space > 0 { + p = s.table[:] + var code_len uint32 + var available_bits uint32 + var bits uint32 = 0 + if get_byte && !pullByte(br) { + return decoderNeedsMoreInput + } + get_byte = false + available_bits = getAvailableBits(br) + if available_bits != 0 { + bits = uint32(getBitsUnmasked(br)) + } + + p = p[bits&bitMask(huffmanMaxCodeLengthCodeLength):] + if uint32(p[0].bits) > available_bits { + get_byte = true + continue + } + + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + dropBits(br, uint32(p[0].bits)) + processSingleCodeLength(code_len, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 = code_len - 14 + var repeat_delta uint32 = (bits >> p[0].bits) & bitMask(extra_bits) + if available_bits < uint32(p[0].bits)+extra_bits { + get_byte = true + continue + } + + dropBits(br, uint32(p[0].bits)+extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, &s.repeat_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) + } + } + + return decoderSuccess +} + +/* Reads and decodes 15..18 codes using static prefix code. + Each code is 2..4 bits long. In total 30..72 bits are used. */ +func readCodeLengthCodeLengths(s *Reader) int { + var br *bitReader = &s.br + var num_codes uint32 = s.repeat + var space uint32 = s.space + var i uint32 = s.sub_loop_counter + for ; i < codeLengthCodes; i++ { + var code_len_idx byte = kCodeLengthCodeOrder[i] + var ix uint32 + var v uint32 + if !safeGetBits(br, 4, &ix) { + var available_bits uint32 = getAvailableBits(br) + if available_bits != 0 { + ix = uint32(getBitsUnmasked(br) & 0xF) + } else { + ix = 0 + } + + if uint32(kCodeLengthPrefixLength[ix]) > available_bits { + s.sub_loop_counter = i + s.repeat = num_codes + s.space = space + s.substate_huffman = stateHuffmanComplex + return decoderNeedsMoreInput + } + } + + v = uint32(kCodeLengthPrefixValue[ix]) + dropBits(br, uint32(kCodeLengthPrefixLength[ix])) + s.code_length_code_lengths[code_len_idx] = byte(v) + if v != 0 { + space = space - (32 >> v) + num_codes++ + s.code_length_histo[v]++ + if space-1 >= 32 { + /* space is 0 or wrapped around. */ + break + } + } + } + + if num_codes != 1 && space != 0 { + return decoderErrorFormatClSpace + } + + return decoderSuccess +} + +/* Decodes the Huffman tables. + There are 2 scenarios: + A) Huffman code contains only few symbols (1..4). Those symbols are read + directly; their code lengths are defined by the number of symbols. + For this scenario 4 - 49 bits will be read. + + B) 2-phase decoding: + B.1) Small Huffman table is decoded; it is specified with code lengths + encoded with predefined entropy code. 32 - 74 bits are used. + B.2) Decoded table is used to decode code lengths of symbols in resulting + Huffman table. In worst case 3520 bits are read. */ +func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int { + var br *bitReader = &s.br + + /* Unnecessary masking, but might be good for safety. */ + alphabet_size &= 0x7FF + + /* State machine. */ + for { + switch s.substate_huffman { + case stateHuffmanNone: + if !safeReadBits(br, 2, &s.sub_loop_counter) { + return decoderNeedsMoreInput + } + + /* The value is used as follows: + 1 for simple code; + 0 for no skipping, 2 skips 2 code lengths, 3 skips 3 code lengths */ + if s.sub_loop_counter != 1 { + s.space = 32 + s.repeat = 0 /* num_codes */ + var i int + for i = 0; i <= huffmanMaxCodeLengthCodeLength; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i < codeLengthCodes; i++ { + s.code_length_code_lengths[i] = 0 + } + + s.substate_huffman = stateHuffmanComplex + continue + } + fallthrough + + /* Read symbols, codes & code lengths directly. */ + case stateHuffmanSimpleSize: + if !safeReadBits(br, 2, &s.symbol) { /* num_symbols */ + s.substate_huffman = stateHuffmanSimpleSize + return decoderNeedsMoreInput + } + + s.sub_loop_counter = 0 + fallthrough + + case stateHuffmanSimpleRead: + { + var result int = readSimpleHuffmanSymbols(alphabet_size, max_symbol, s) + if result != decoderSuccess { + return result + } + } + fallthrough + + case stateHuffmanSimpleBuild: + var table_size uint32 + if s.symbol == 3 { + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_huffman = stateHuffmanSimpleBuild + return decoderNeedsMoreInput + } + + s.symbol += bits + } + + table_size = buildSimpleHuffmanTable(table, huffmanTableBits, s.symbols_lists_array[:], s.symbol) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + /* Decode Huffman-coded code lengths. */ + case stateHuffmanComplex: + { + var i uint32 + var result int = readCodeLengthCodeLengths(s) + if result != decoderSuccess { + return result + } + + buildCodeLengthsHuffmanTable(s.table[:], s.code_length_code_lengths[:], s.code_length_histo[:]) + for i = 0; i < 16; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i <= huffmanMaxCodeLength; i++ { + s.next_symbol[i] = int(i) - (huffmanMaxCodeLength + 1) + symbolListPut(s.symbol_lists, s.next_symbol[i], 0xFFFF) + } + + s.symbol = 0 + s.prev_code_len = initialRepeatedCodeLength + s.repeat = 0 + s.repeat_code_len = 0 + s.space = 32768 + s.substate_huffman = stateHuffmanLengthSymbols + } + fallthrough + + case stateHuffmanLengthSymbols: + var table_size uint32 + var result int = readSymbolCodeLengths(max_symbol, s) + if result == decoderNeedsMoreInput { + result = safeReadSymbolCodeLengths(max_symbol, s) + } + + if result != decoderSuccess { + return result + } + + if s.space != 0 { + return decoderErrorFormatHuffmanSpace + } + + table_size = buildHuffmanTable(table, huffmanTableBits, s.symbol_lists, s.code_length_histo[:]) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes a block length by reading 3..39 bits. */ +func readBlockLength(table []huffmanCode, br *bitReader) uint32 { + var code uint32 + var nbits uint32 + code = readSymbol(table, br) + nbits = kBlockLengthPrefixCode[code].nbits /* nbits == 2..24 */ + return kBlockLengthPrefixCode[code].offset + readBits(br, nbits) +} + +/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then + reading can't be continued with ReadBlockLength. */ +func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool { + var index uint32 + if s.substate_read_block_length == stateReadBlockLengthNone { + if !safeReadSymbol(table, br, &index) { + return false + } + } else { + index = s.block_length_index + } + { + var bits uint32 /* nbits == 2..24 */ + var nbits uint32 = kBlockLengthPrefixCode[index].nbits + if !safeReadBits(br, nbits, &bits) { + s.block_length_index = index + s.substate_read_block_length = stateReadBlockLengthSuffix + return false + } + + *result = kBlockLengthPrefixCode[index].offset + bits + s.substate_read_block_length = stateReadBlockLengthNone + return true + } +} + +/* Transform: + 1) initialize list L with values 0, 1,... 255 + 2) For each input element X: + 2.1) let Y = L[X] + 2.2) remove X-th element from L + 2.3) prepend Y to L + 2.4) append Y to output + + In most cases max(Y) <= 7, so most of L remains intact. + To reduce the cost of initialization, we reuse L, remember the upper bound + of Y values, and reinitialize only first elements in L. + + Most of input values are 0 and 1. To reduce number of branches, we replace + inner for loop with do-while. */ +func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) { + var mtf [256]byte + var i int + for i = 1; i < 256; i++ { + mtf[i] = byte(i) + } + var mtf_1 byte + + /* Transform the input. */ + for i = 0; uint32(i) < v_len; i++ { + var index int = int(v[i]) + var value byte = mtf[index] + v[i] = value + mtf_1 = value + for index >= 1 { + index-- + mtf[index+1] = mtf[index] + } + + mtf[0] = mtf_1 + } +} + +/* Decodes a series of Huffman table using ReadHuffmanCode function. */ +func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int { + if s.substate_tree_group != stateTreeGroupLoop { + s.next = group.codes + s.htree_index = 0 + s.substate_tree_group = stateTreeGroupLoop + } + + for s.htree_index < int(group.num_htrees) { + var table_size uint32 + var result int = readHuffmanCode(uint32(group.alphabet_size), uint32(group.max_symbol), s.next, &table_size, s) + if result != decoderSuccess { + return result + } + group.htrees[s.htree_index] = s.next + s.next = s.next[table_size:] + s.htree_index++ + } + + s.substate_tree_group = stateTreeGroupNone + return decoderSuccess +} + +/* Decodes a context map. + Decoding is done in 4 phases: + 1) Read auxiliary information (6..16 bits) and allocate memory. + In case of trivial context map, decoding is finished at this phase. + 2) Decode Huffman table using ReadHuffmanCode function. + This table will be used for reading context map items. + 3) Read context map items; "0" values could be run-length encoded. + 4) Optionally, apply InverseMoveToFront transform to the resulting map. */ +func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int { + var br *bitReader = &s.br + var result int = decoderSuccess + + switch int(s.substate_context_map) { + case stateContextMapNone: + result = decodeVarLenUint8(s, br, num_htrees) + if result != decoderSuccess { + return result + } + + (*num_htrees)++ + s.context_index = 0 + *context_map_arg = make([]byte, uint(context_map_size)) + if *context_map_arg == nil { + return decoderErrorAllocContextMap + } + + if *num_htrees <= 1 { + for i := 0; i < int(context_map_size); i++ { + (*context_map_arg)[i] = 0 + } + return decoderSuccess + } + + s.substate_context_map = stateContextMapReadPrefix + fallthrough + /* Fall through. */ + case stateContextMapReadPrefix: + { + var bits uint32 + + /* In next stage ReadHuffmanCode uses at least 4 bits, so it is safe + to peek 4 bits ahead. */ + if !safeGetBits(br, 5, &bits) { + return decoderNeedsMoreInput + } + + if bits&1 != 0 { /* Use RLE for zeros. */ + s.max_run_length_prefix = (bits >> 1) + 1 + dropBits(br, 5) + } else { + s.max_run_length_prefix = 0 + dropBits(br, 1) + } + + s.substate_context_map = stateContextMapHuffman + } + fallthrough + + /* Fall through. */ + case stateContextMapHuffman: + { + var alphabet_size uint32 = *num_htrees + s.max_run_length_prefix + result = readHuffmanCode(alphabet_size, alphabet_size, s.context_map_table[:], nil, s) + if result != decoderSuccess { + return result + } + s.code = 0xFFFF + s.substate_context_map = stateContextMapDecode + } + fallthrough + + /* Fall through. */ + case stateContextMapDecode: + { + var context_index uint32 = s.context_index + var max_run_length_prefix uint32 = s.max_run_length_prefix + var context_map []byte = *context_map_arg + var code uint32 = s.code + var skip_preamble bool = (code != 0xFFFF) + for context_index < context_map_size || skip_preamble { + if !skip_preamble { + if !safeReadSymbol(s.context_map_table[:], br, &code) { + s.code = 0xFFFF + s.context_index = context_index + return decoderNeedsMoreInput + } + + if code == 0 { + context_map[context_index] = 0 + context_index++ + continue + } + + if code > max_run_length_prefix { + context_map[context_index] = byte(code - max_run_length_prefix) + context_index++ + continue + } + } else { + skip_preamble = false + } + + /* RLE sub-stage. */ + { + var reps uint32 + if !safeReadBits(br, code, &reps) { + s.code = code + s.context_index = context_index + return decoderNeedsMoreInput + } + + reps += 1 << code + if context_index+reps > context_map_size { + return decoderErrorFormatContextMapRepeat + } + + for { + context_map[context_index] = 0 + context_index++ + reps-- + if reps == 0 { + break + } + } + } + } + } + fallthrough + + case stateContextMapTransform: + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_context_map = stateContextMapTransform + return decoderNeedsMoreInput + } + + if bits != 0 { + inverseMoveToFrontTransform(*context_map_arg, context_map_size, s) + } + + s.substate_context_map = stateContextMapNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a command or literal and updates block type ring-buffer. + Reads 3..54 bits. */ +func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { + var max_block_type uint32 = s.num_block_types[tree_type] + type_tree := s.block_type_trees[tree_type*huffmanMaxSize258:] + len_tree := s.block_len_trees[tree_type*huffmanMaxSize26:] + var br *bitReader = &s.br + var ringbuffer []uint32 = s.block_type_rb[tree_type*2:] + var block_type uint32 + if max_block_type <= 1 { + return false + } + + /* Read 0..15 + 3..39 bits. */ + if safe == 0 { + block_type = readSymbol(type_tree, br) + s.block_length[tree_type] = readBlockLength(len_tree, br) + } else { + var memento bitReaderState + bitReaderSaveState(br, &memento) + if !safeReadSymbol(type_tree, br, &block_type) { + return false + } + if !safeReadBlockLength(s, &s.block_length[tree_type], len_tree, br) { + s.substate_read_block_length = stateReadBlockLengthNone + bitReaderRestoreState(br, &memento) + return false + } + } + + if block_type == 1 { + block_type = ringbuffer[1] + 1 + } else if block_type == 0 { + block_type = ringbuffer[0] + } else { + block_type -= 2 + } + + if block_type >= max_block_type { + block_type -= max_block_type + } + + ringbuffer[0] = ringbuffer[1] + ringbuffer[1] = block_type + return true +} + +func detectTrivialLiteralBlockTypes(s *Reader) { + var i uint + for i = 0; i < 8; i++ { + s.trivial_literal_contexts[i] = 0 + } + for i = 0; uint32(i) < s.num_block_types[0]; i++ { + var offset uint = i << literalContextBits + var error uint = 0 + var sample uint = uint(s.context_map[offset]) + var j uint + for j = 0; j < 1<>5] |= 1 << (i & 31) + } + } +} + +func prepareLiteralDecoding(s *Reader) { + var context_mode byte + var trivial uint + var block_type uint32 = s.block_type_rb[1] + var context_offset uint32 = block_type << literalContextBits + s.context_map_slice = s.context_map[context_offset:] + trivial = uint(s.trivial_literal_contexts[block_type>>5]) + s.trivial_literal_context = int((trivial >> (block_type & 31)) & 1) + s.literal_htree = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[0]]) + context_mode = s.context_modes[block_type] & 3 + s.context_lookup = getContextLUT(int(context_mode)) +} + +/* Decodes the block type and updates the state for literal context. + Reads 3..54 bits. */ +func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 0) { + return false + } + + prepareLiteralDecoding(s) + return true +} + +func decodeLiteralBlockSwitch(s *Reader) { + decodeLiteralBlockSwitchInternal(0, s) +} + +func safeDecodeLiteralBlockSwitch(s *Reader) bool { + return decodeLiteralBlockSwitchInternal(1, s) +} + +/* Block switch for insert/copy length. + Reads 3..54 bits. */ +func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 1) { + return false + } + + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[s.block_type_rb[3]]) + return true +} + +func decodeCommandBlockSwitch(s *Reader) { + decodeCommandBlockSwitchInternal(0, s) +} + +func safeDecodeCommandBlockSwitch(s *Reader) bool { + return decodeCommandBlockSwitchInternal(1, s) +} + +/* Block switch for distance codes. + Reads 3..54 bits. */ +func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 2) { + return false + } + + s.dist_context_map_slice = s.dist_context_map[s.block_type_rb[5]< s.ringbuffer_size { + pos = uint(s.ringbuffer_size) + } else { + pos = uint(s.pos) + } + var partial_pos_rb uint = (s.rb_roundtrips * uint(s.ringbuffer_size)) + pos + return partial_pos_rb - s.partial_pos_out +} + +/* Dumps output. + Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push + and either ring-buffer is as big as window size, or |force| is true. */ +func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { + start := s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] + var to_write uint = unwrittenBytes(s, true) + var num_written uint = *available_out + if num_written > to_write { + num_written = to_write + } + + if s.meta_block_remaining_len < 0 { + return decoderErrorFormatBlockLength1 + } + + if next_out != nil && *next_out == nil { + *next_out = start + } else { + if next_out != nil { + copy(*next_out, start[:num_written]) + *next_out = (*next_out)[num_written:] + } + } + + *available_out -= num_written + s.partial_pos_out += num_written + if total_out != nil { + *total_out = s.partial_pos_out + } + + if num_written < to_write { + if s.ringbuffer_size == 1<= s.ringbuffer_size { + s.pos -= s.ringbuffer_size + s.rb_roundtrips++ + if uint(s.pos) != 0 { + s.should_wrap_ringbuffer = 1 + } else { + s.should_wrap_ringbuffer = 0 + } + } + + return decoderSuccess +} + +func wrapRingBuffer(s *Reader) { + if s.should_wrap_ringbuffer != 0 { + copy(s.ringbuffer, s.ringbuffer_end[:uint(s.pos)]) + s.should_wrap_ringbuffer = 0 + } +} + +/* Allocates ring-buffer. + + s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before + this function is called. + + Last two bytes of ring-buffer are initialized to 0, so context calculation + could be done uniformly for the first two and all other positions. */ +func ensureRingBuffer(s *Reader) bool { + var old_ringbuffer []byte = s.ringbuffer + if s.ringbuffer_size == s.new_ringbuffer_size { + return true + } + + s.ringbuffer = make([]byte, uint(s.new_ringbuffer_size)+uint(kRingBufferWriteAheadSlack)) + if s.ringbuffer == nil { + /* Restore previous value. */ + s.ringbuffer = old_ringbuffer + + return false + } + + s.ringbuffer[s.new_ringbuffer_size-2] = 0 + s.ringbuffer[s.new_ringbuffer_size-1] = 0 + + if !(old_ringbuffer == nil) { + copy(s.ringbuffer, old_ringbuffer[:uint(s.pos)]) + + old_ringbuffer = nil + } + + s.ringbuffer_size = s.new_ringbuffer_size + s.ringbuffer_mask = s.new_ringbuffer_size - 1 + s.ringbuffer_end = s.ringbuffer[s.ringbuffer_size:] + + return true +} + +func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_out *uint, s *Reader) int { + /* TODO: avoid allocation for single uncompressed block. */ + if !ensureRingBuffer(s) { + return decoderErrorAllocRingBuffer1 + } + + /* State machine */ + for { + switch s.substate_uncompressed { + case stateUncompressedNone: + { + var nbytes int = int(getRemainingBytes(&s.br)) + if nbytes > s.meta_block_remaining_len { + nbytes = s.meta_block_remaining_len + } + + if s.pos+nbytes > s.ringbuffer_size { + nbytes = s.ringbuffer_size - s.pos + } + + /* Copy remaining bytes from s->br.buf_ to ring-buffer. */ + copyBytes(s.ringbuffer[s.pos:], &s.br, uint(nbytes)) + + s.pos += nbytes + s.meta_block_remaining_len -= nbytes + if s.pos < 1<>1 >= min_size { + new_ringbuffer_size >>= 1 + } + } + + s.new_ringbuffer_size = new_ringbuffer_size +} + +/* Reads 1..256 2-bit context modes. */ +func readContextModes(s *Reader) int { + var br *bitReader = &s.br + var i int = s.loop_counter + + for i < int(s.num_block_types[0]) { + var bits uint32 + if !safeReadBits(br, 2, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + s.context_modes[i] = byte(bits) + i++ + } + + return decoderSuccess +} + +func takeDistanceFromRingBuffer(s *Reader) { + if s.distance_code == 0 { + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + + /* Compensate double distance-ring-buffer roll for dictionary items. */ + s.distance_context = 1 + } else { + var distance_code int = s.distance_code << 1 + const kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B + const kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 + var v int = (s.dist_rb_idx + int(kDistanceShortCodeIndexOffset>>uint(distance_code))) & 0x3 + /* kDistanceShortCodeIndexOffset has 2-bit values from LSB: + 3, 2, 1, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 */ + + /* kDistanceShortCodeValueOffset has 2-bit values from LSB: + -0, 0,-0, 0,-1, 1,-2, 2,-3, 3,-1, 1,-2, 2,-3, 3 */ + s.distance_code = s.dist_rb[v] + + v = int(kDistanceShortCodeValueOffset>>uint(distance_code)) & 0x3 + if distance_code&0x3 != 0 { + s.distance_code += v + } else { + s.distance_code -= v + if s.distance_code <= 0 { + /* A huge distance will cause a () soon. + This is a little faster than failing here. */ + s.distance_code = 0x7FFFFFFF + } + } + } +} + +func safeReadBitsMaybeZero(br *bitReader, n_bits uint32, val *uint32) bool { + if n_bits != 0 { + return safeReadBits(br, n_bits, val) + } else { + *val = 0 + return true + } +} + +/* Precondition: s->distance_code < 0. */ +func readDistanceInternal(safe int, s *Reader, br *bitReader) bool { + var distval int + var memento bitReaderState + var distance_tree []huffmanCode = []huffmanCode(s.distance_hgroup.htrees[s.dist_htree_index]) + if safe == 0 { + s.distance_code = int(readSymbol(distance_tree, br)) + } else { + var code uint32 + bitReaderSaveState(br, &memento) + if !safeReadSymbol(distance_tree, br, &code) { + return false + } + + s.distance_code = int(code) + } + + /* Convert the distance code to the actual distance by possibly + looking up past distances from the s->ringbuffer. */ + s.distance_context = 0 + + if s.distance_code&^0xF == 0 { + takeDistanceFromRingBuffer(s) + s.block_length[2]-- + return true + } + + distval = s.distance_code - int(s.num_direct_distance_codes) + if distval >= 0 { + var nbits uint32 + var postfix int + var offset int + if safe == 0 && (s.distance_postfix_bits == 0) { + nbits = (uint32(distval) >> 1) + 1 + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + offset + int(readBits(br, nbits)) + } else { + /* This branch also works well when s->distance_postfix_bits == 0. */ + var bits uint32 + postfix = distval & s.distance_postfix_mask + distval >>= s.distance_postfix_bits + nbits = (uint32(distval) >> 1) + 1 + if safe != 0 { + if !safeReadBitsMaybeZero(br, nbits, &bits) { + s.distance_code = -1 /* Restore precondition. */ + bitReaderRestoreState(br, &memento) + return false + } + } else { + bits = readBits(br, nbits) + } + + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + ((offset + int(bits)) << s.distance_postfix_bits) + postfix + } + } + + s.distance_code = s.distance_code - numDistanceShortCodes + 1 + s.block_length[2]-- + return true +} + +func readDistance(s *Reader, br *bitReader) { + readDistanceInternal(0, s, br) +} + +func safeReadDistance(s *Reader, br *bitReader) bool { + return readDistanceInternal(1, s, br) +} + +func readCommandInternal(safe int, s *Reader, br *bitReader, insert_length *int) bool { + var cmd_code uint32 + var insert_len_extra uint32 = 0 + var copy_length uint32 + var v cmdLutElement + var memento bitReaderState + if safe == 0 { + cmd_code = readSymbol(s.htree_command, br) + } else { + bitReaderSaveState(br, &memento) + if !safeReadSymbol(s.htree_command, br, &cmd_code) { + return false + } + } + + v = kCmdLut[cmd_code] + s.distance_code = int(v.distance_code) + s.distance_context = int(v.context) + s.dist_htree_index = s.dist_context_map_slice[s.distance_context] + *insert_length = int(v.insert_len_offset) + if safe == 0 { + if v.insert_len_extra_bits != 0 { + insert_len_extra = readBits(br, uint32(v.insert_len_extra_bits)) + } + + copy_length = readBits(br, uint32(v.copy_len_extra_bits)) + } else { + if !safeReadBitsMaybeZero(br, uint32(v.insert_len_extra_bits), &insert_len_extra) || !safeReadBitsMaybeZero(br, uint32(v.copy_len_extra_bits), ©_length) { + bitReaderRestoreState(br, &memento) + return false + } + } + + s.copy_length = int(copy_length) + int(v.copy_len_offset) + s.block_length[1]-- + *insert_length += int(insert_len_extra) + return true +} + +func readCommand(s *Reader, br *bitReader, insert_length *int) { + readCommandInternal(0, s, br, insert_length) +} + +func safeReadCommand(s *Reader, br *bitReader, insert_length *int) bool { + return readCommandInternal(1, s, br, insert_length) +} + +func checkInputAmountMaybeSafe(safe int, br *bitReader, num uint) bool { + if safe != 0 { + return true + } + + return checkInputAmount(br, num) +} + +func processCommandsInternal(safe int, s *Reader) int { + var pos int = s.pos + var i int = s.loop_counter + var result int = decoderSuccess + var br *bitReader = &s.br + var hc []huffmanCode + + if !checkInputAmountMaybeSafe(safe, br, 28) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if safe == 0 { + warmupBitReader(br) + } + + /* Jump into state machine. */ + if s.state == stateCommandBegin { + goto CommandBegin + } else if s.state == stateCommandInner { + goto CommandInner + } else if s.state == stateCommandPostDecodeLiterals { + goto CommandPostDecodeLiterals + } else if s.state == stateCommandPostWrapCopy { + goto CommandPostWrapCopy + } else { + return decoderErrorUnreachable + } + +CommandBegin: + if safe != 0 { + s.state = stateCommandBegin + } + + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 156 bits + 7 bytes */ + s.state = stateCommandBegin + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[1] == 0 { + if safe != 0 { + if !safeDecodeCommandBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeCommandBlockSwitch(s) + } + + goto CommandBegin + } + + /* Read the insert/copy length in the command. */ + if safe != 0 { + if !safeReadCommand(s, br, &i) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readCommand(s, br, &i) + } + + if i == 0 { + goto CommandPostDecodeLiterals + } + + s.meta_block_remaining_len -= i + +CommandInner: + if safe != 0 { + s.state = stateCommandInner + } + + /* Read the literals in the command. */ + if s.trivial_literal_context != 0 { + var bits uint32 + var value uint32 + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + for { + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + if s.trivial_literal_context == 0 { + goto CommandInner + } + } + + if safe == 0 { + s.ringbuffer[pos] = byte(readPreloadedSymbol(s.literal_htree, br, &bits, &value)) + } else { + var literal uint32 + if !safeReadSymbol(s.literal_htree, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + s.ringbuffer[pos] = byte(literal) + } + + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } else { + var p1 byte = s.ringbuffer[(pos-1)&s.ringbuffer_mask] + var p2 byte = s.ringbuffer[(pos-2)&s.ringbuffer_mask] + for { + var context byte + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + if s.trivial_literal_context != 0 { + goto CommandInner + } + } + + context = getContext(p1, p2, s.context_lookup) + hc = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[context]]) + p2 = p1 + if safe == 0 { + p1 = byte(readSymbol(hc, br)) + } else { + var literal uint32 + if !safeReadSymbol(hc, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + p1 = byte(literal) + } + + s.ringbuffer[pos] = p1 + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } + + if s.meta_block_remaining_len <= 0 { + s.state = stateMetablockDone + goto saveStateAndReturn + } + +CommandPostDecodeLiterals: + if safe != 0 { + s.state = stateCommandPostDecodeLiterals + } + + if s.distance_code >= 0 { + /* Implicit distance case. */ + if s.distance_code != 0 { + s.distance_context = 0 + } else { + s.distance_context = 1 + } + + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + } else { + /* Read distance code in the command, unless it was implicitly zero. */ + if s.block_length[2] == 0 { + if safe != 0 { + if !safeDecodeDistanceBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeDistanceBlockSwitch(s) + } + } + + if safe != 0 { + if !safeReadDistance(s, br) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readDistance(s, br) + } + } + + if s.max_distance != s.max_backward_distance { + if pos < s.max_backward_distance { + s.max_distance = pos + } else { + s.max_distance = s.max_backward_distance + } + } + + i = s.copy_length + + /* Apply copy of LZ77 back-reference, or static dictionary reference if + the distance is larger than the max LZ77 distance */ + if s.distance_code > s.max_distance { + /* The maximum allowed distance is BROTLI_MAX_ALLOWED_DISTANCE = 0x7FFFFFFC. + With this choice, no signed overflow can occur after decoding + a special distance code (e.g., after adding 3 to the last distance). */ + if s.distance_code > maxAllowedDistance { + return decoderErrorFormatDistance + } + + if i >= minDictionaryWordLength && i <= maxDictionaryWordLength { + var address int = s.distance_code - s.max_distance - 1 + var words *dictionary = s.dictionary + var trans *transforms = s.transforms + var offset int = int(s.dictionary.offsets_by_length[i]) + var shift uint32 = uint32(s.dictionary.size_bits_by_length[i]) + var mask int = int(bitMask(shift)) + var word_idx int = address & mask + var transform_idx int = address >> shift + + /* Compensate double distance-ring-buffer roll. */ + s.dist_rb_idx += s.distance_context + + offset += word_idx * i + if words.data == nil { + return decoderErrorDictionaryNotSet + } + + if transform_idx < int(trans.num_transforms) { + word := words.data[offset:] + var len int = i + if transform_idx == int(trans.cutOffTransforms[0]) { + copy(s.ringbuffer[pos:], word[:uint(len)]) + } else { + len = transformDictionaryWord(s.ringbuffer[pos:], word, int(len), trans, transform_idx) + } + + pos += int(len) + s.meta_block_remaining_len -= int(len) + if pos >= s.ringbuffer_size { + s.state = stateCommandPostWrite1 + goto saveStateAndReturn + } + } else { + return decoderErrorFormatTransform + } + } else { + return decoderErrorFormatDictionary + } + } else { + var src_start int = (pos - s.distance_code) & s.ringbuffer_mask + copy_dst := s.ringbuffer[pos:] + copy_src := s.ringbuffer[src_start:] + var dst_end int = pos + i + var src_end int = src_start + i + + /* Update the recent distances cache. */ + s.dist_rb[s.dist_rb_idx&3] = s.distance_code + + s.dist_rb_idx++ + s.meta_block_remaining_len -= i + + /* There are 32+ bytes of slack in the ring-buffer allocation. + Also, we have 16 short codes, that make these 16 bytes irrelevant + in the ring-buffer. Let's copy over them as a first guess. */ + copy(copy_dst, copy_src[:16]) + + if src_end > pos && dst_end > src_start { + /* Regions intersect. */ + goto CommandPostWrapCopy + } + + if dst_end >= s.ringbuffer_size || src_end >= s.ringbuffer_size { + /* At least one region wraps. */ + goto CommandPostWrapCopy + } + + pos += i + if i > 16 { + if i > 32 { + copy(copy_dst[16:], copy_src[16:][:uint(i-16)]) + } else { + /* This branch covers about 45% cases. + Fixed size short copy allows more compiler optimizations. */ + copy(copy_dst[16:], copy_src[16:][:16]) + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } +CommandPostWrapCopy: + { + var wrap_guard int = s.ringbuffer_size - pos + for { + i-- + if i < 0 { + break + } + s.ringbuffer[pos] = s.ringbuffer[(pos-s.distance_code)&s.ringbuffer_mask] + pos++ + wrap_guard-- + if wrap_guard == 0 { + s.state = stateCommandPostWrite2 + goto saveStateAndReturn + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } + +saveStateAndReturn: + s.pos = pos + s.loop_counter = i + return result +} + +func processCommands(s *Reader) int { + return processCommandsInternal(0, s) +} + +func safeProcessCommands(s *Reader) int { + return processCommandsInternal(1, s) +} + +/* Returns the maximum number of distance symbols which can only represent + distances not exceeding BROTLI_MAX_ALLOWED_DISTANCE. */ + +var maxDistanceSymbol_bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} +var maxDistanceSymbol_diff = [maxNpostfix + 1]uint32{73, 126, 228, 424} + +func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 { + var postfix uint32 = 1 << npostfix + if ndirect < maxDistanceSymbol_bound[npostfix] { + return ndirect + maxDistanceSymbol_diff[npostfix] + postfix + } else if ndirect > maxDistanceSymbol_bound[npostfix]+postfix { + return ndirect + maxDistanceSymbol_diff[npostfix] + } else { + return maxDistanceSymbol_bound[npostfix] + maxDistanceSymbol_diff[npostfix] + postfix + } +} + +/* Invariant: input stream is never overconsumed: + - invalid input implies that the whole stream is invalid -> any amount of + input could be read and discarded + - when result is "needs more input", then at least one more byte is REQUIRED + to complete decoding; all input data MUST be consumed by decoder, so + client could swap the input buffer + - when result is "needs more output" decoder MUST ensure that it doesn't + hold more than 7 bits in bit reader; this saves client from swapping input + buffer ahead of time + - when result is "success" decoder MUST return all unused data back to input + buffer; this is possible because the invariant is held on enter */ +func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int { + var result int = decoderSuccess + var br *bitReader = &s.br + + /* Do not try to process further in a case of unrecoverable error. */ + if int(s.error_code) < 0 { + return decoderResultError + } + + if *available_out != 0 && (next_out == nil || *next_out == nil) { + return saveErrorCode(s, decoderErrorInvalidArguments) + } + + if *available_out == 0 { + next_out = nil + } + if s.buffer_length == 0 { /* Just connect bit reader to input stream. */ + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + } else { + /* At least one byte of input is required. More than one byte of input may + be required to complete the transaction -> reading more data must be + done in a loop -> do it in a main loop. */ + result = decoderNeedsMoreInput + + br.input = s.buffer.u8[:] + br.byte_pos = 0 + } + + /* State machine */ + for { + if result != decoderSuccess { + /* Error, needs more input/output. */ + if result == decoderNeedsMoreInput { + if s.ringbuffer != nil { /* Pro-actively push output. */ + var intermediate_result int = writeRingBuffer(s, available_out, next_out, nil, true) + + /* WriteRingBuffer checks s->meta_block_remaining_len validity. */ + if int(intermediate_result) < 0 { + result = intermediate_result + break + } + } + + if s.buffer_length != 0 { /* Used with internal buffer. */ + if br.byte_pos == br.input_len { + /* Successfully finished read transaction. + Accumulator contains less than 8 bits, because internal buffer + is expanded byte-by-byte until it is enough to complete read. */ + s.buffer_length = 0 + + /* Switch to input stream and restart. */ + result = decoderSuccess + + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + continue + } else if *available_in != 0 { + /* Not enough data in buffer, but can take one more byte from + input stream. */ + result = decoderSuccess + + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + br.input_len = uint(s.buffer_length) + *next_in = (*next_in)[1:] + (*available_in)-- + + /* Retry with more data in buffer. */ + continue + } + + /* Can't finish reading and no more input. */ + break + /* Input stream doesn't contain enough input. */ + } else { + /* Copy tail to internal buffer and return. */ + *next_in = br.input[br.byte_pos:] + + *available_in = br.input_len - br.byte_pos + for *available_in != 0 { + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + *next_in = (*next_in)[1:] + (*available_in)-- + } + + break + } + } + + /* Unreachable. */ + + /* Fail or needs more output. */ + if s.buffer_length != 0 { + /* Just consumed the buffered input and produced some output. Otherwise + it would result in "needs more input". Reset internal buffer. */ + s.buffer_length = 0 + } else { + /* Using input stream in last iteration. When decoder switches to input + stream it has less than 8 bits in accumulator, so it is safe to + return unused accumulator bits there. */ + bitReaderUnload(br) + + *available_in = br.input_len - br.byte_pos + *next_in = br.input[br.byte_pos:] + } + + break + } + + switch s.state { + /* Prepare to the first read. */ + case stateUninited: + if !warmupBitReader(br) { + result = decoderNeedsMoreInput + break + } + + /* Decode window size. */ + result = decodeWindowBits(s, br) /* Reads 1..8 bits. */ + if result != decoderSuccess { + break + } + + if s.large_window { + s.state = stateLargeWindowBits + break + } + + s.state = stateInitialize + + case stateLargeWindowBits: + if !safeReadBits(br, 6, &s.window_bits) { + result = decoderNeedsMoreInput + break + } + + if s.window_bits < largeMinWbits || s.window_bits > largeMaxWbits { + result = decoderErrorFormatWindowBits + break + } + + s.state = stateInitialize + fallthrough + + /* Maximum distance, see section 9.1. of the spec. */ + /* Fall through. */ + case stateInitialize: + s.max_backward_distance = (1 << s.window_bits) - windowGap + + /* Allocate memory for both block_type_trees and block_len_trees. */ + s.block_type_trees = make([]huffmanCode, (3 * (huffmanMaxSize258 + huffmanMaxSize26))) + + if s.block_type_trees == nil { + result = decoderErrorAllocBlockTypeTrees + break + } + + s.block_len_trees = s.block_type_trees[3*huffmanMaxSize258:] + + s.state = stateMetablockBegin + fallthrough + + /* Fall through. */ + case stateMetablockBegin: + decoderStateMetablockBegin(s) + + s.state = stateMetablockHeader + fallthrough + + /* Fall through. */ + case stateMetablockHeader: + result = decodeMetaBlockLength(s, br) + /* Reads 2 - 31 bits. */ + if result != decoderSuccess { + break + } + + if s.is_metadata != 0 || s.is_uncompressed != 0 { + if !bitReaderJumpToByteBoundary(br) { + result = decoderErrorFormatPadding1 + break + } + } + + if s.is_metadata != 0 { + s.state = stateMetadata + break + } + + if s.meta_block_remaining_len == 0 { + s.state = stateMetablockDone + break + } + + calculateRingBufferSize(s) + if s.is_uncompressed != 0 { + s.state = stateUncompressed + break + } + + s.loop_counter = 0 + s.state = stateHuffmanCode0 + + case stateUncompressed: + result = copyUncompressedBlockToOutput(available_out, next_out, nil, s) + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateMetadata: + for ; s.meta_block_remaining_len > 0; s.meta_block_remaining_len-- { + var bits uint32 + + /* Read one byte and ignore it. */ + if !safeReadBits(br, 8, &bits) { + result = decoderNeedsMoreInput + break + } + } + + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateHuffmanCode0: + if s.loop_counter >= 3 { + s.state = stateMetablockHeader2 + break + } + + /* Reads 1..11 bits. */ + result = decodeVarLenUint8(s, br, &s.num_block_types[s.loop_counter]) + + if result != decoderSuccess { + break + } + + s.num_block_types[s.loop_counter]++ + if s.num_block_types[s.loop_counter] < 2 { + s.loop_counter++ + break + } + + s.state = stateHuffmanCode1 + fallthrough + + case stateHuffmanCode1: + { + var alphabet_size uint32 = s.num_block_types[s.loop_counter] + 2 + var tree_offset int = s.loop_counter * huffmanMaxSize258 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_type_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode2 + } + fallthrough + + case stateHuffmanCode2: + { + var alphabet_size uint32 = numBlockLenSymbols + var tree_offset int = s.loop_counter * huffmanMaxSize26 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_len_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode3 + } + fallthrough + + case stateHuffmanCode3: + var tree_offset int = s.loop_counter * huffmanMaxSize26 + if !safeReadBlockLength(s, &s.block_length[s.loop_counter], s.block_len_trees[tree_offset:], br) { + result = decoderNeedsMoreInput + break + } + + s.loop_counter++ + s.state = stateHuffmanCode0 + + case stateMetablockHeader2: + { + var bits uint32 + if !safeReadBits(br, 6, &bits) { + result = decoderNeedsMoreInput + break + } + + s.distance_postfix_bits = bits & bitMask(2) + bits >>= 2 + s.num_direct_distance_codes = numDistanceShortCodes + (bits << s.distance_postfix_bits) + s.distance_postfix_mask = int(bitMask(s.distance_postfix_bits)) + s.context_modes = make([]byte, uint(s.num_block_types[0])) + if s.context_modes == nil { + result = decoderErrorAllocContextModes + break + } + + s.loop_counter = 0 + s.state = stateContextModes + } + fallthrough + + case stateContextModes: + result = readContextModes(s) + + if result != decoderSuccess { + break + } + + s.state = stateContextMap1 + fallthrough + + case stateContextMap1: + result = decodeContextMap(s.num_block_types[0]<= 3 { + prepareLiteralDecoding(s) + s.dist_context_map_slice = s.dist_context_map + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[0]) + if !ensureRingBuffer(s) { + result = decoderErrorAllocRingBuffer2 + break + } + + s.state = stateCommandBegin + } + + case stateCommandBegin, stateCommandInner, stateCommandPostDecodeLiterals, stateCommandPostWrapCopy: + result = processCommands(s) + + if result == decoderNeedsMoreInput { + result = safeProcessCommands(s) + } + + case stateCommandInnerWrite, stateCommandPostWrite1, stateCommandPostWrite2: + result = writeRingBuffer(s, available_out, next_out, nil, false) + + if result != decoderSuccess { + break + } + + wrapRingBuffer(s) + if s.ringbuffer_size == 1<= uint64(block_size) { + return 0 + } + return block_size - uint(delta) +} + +/* Wraps 64-bit input position to 32-bit ring-buffer position preserving + "not-a-first-lap" feature. */ +func wrapPosition(position uint64) uint32 { + var result uint32 = uint32(position) + var gb uint64 = position >> 30 + if gb > 2 { + /* Wrap every 2GiB; The first 3GB are continuous. */ + result = result&((1<<30)-1) | (uint32((gb-1)&1)+1)<<30 + } + + return result +} + +func (s *Writer) getStorage(size int) []byte { + if len(s.storage) < size { + s.storage = make([]byte, size) + } + + return s.storage +} + +func hashTableSize(max_table_size uint, input_size uint) uint { + var htsize uint = 256 + for htsize < max_table_size && htsize < input_size { + htsize <<= 1 + } + + return htsize +} + +func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []int { + var max_table_size uint = maxHashTableSize(quality) + var htsize uint = hashTableSize(max_table_size, input_size) + /* Use smaller hash table when input.size() is smaller, since we + fill the table, incurring O(hash table size) overhead for + compression, and if the input is short, we won't need that + many hash table entries anyway. */ + + var table []int + assert(max_table_size >= 256) + if quality == fastOnePassCompressionQuality { + /* Only odd shifts are supported by fast-one-pass. */ + if htsize&0xAAAAA == 0 { + htsize <<= 1 + } + } + + if htsize <= uint(len(s.small_table_)) { + table = s.small_table_[:] + } else { + if htsize > s.large_table_size_ { + s.large_table_size_ = htsize + s.large_table_ = nil + s.large_table_ = make([]int, htsize) + } + + table = s.large_table_ + } + + *table_size = htsize + for i := 0; i < int(htsize); i++ { + table[i] = 0 + } + return table +} + +func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) { + if large_window { + *last_bytes = uint16((lgwin&0x3F)<<8 | 0x11) + *last_bytes_bits = 14 + } else { + if lgwin == 16 { + *last_bytes = 0 + *last_bytes_bits = 1 + } else if lgwin == 17 { + *last_bytes = 1 + *last_bytes_bits = 7 + } else if lgwin > 17 { + *last_bytes = uint16((lgwin-17)<<1 | 0x01) + *last_bytes_bits = 4 + } else { + *last_bytes = uint16((lgwin-8)<<4 | 0x01) + *last_bytes_bits = 7 + } + } +} + +/* Decide about the context map based on the ability of the prediction + ability of the previous byte UTF8-prefix on the next byte. The + prediction ability is calculated as Shannon entropy. Here we need + Shannon entropy instead of 'BitsEntropy' since the prefix will be + encoded with the remaining 6 bits of the following byte, and + BitsEntropy will assume that symbol to be stored alone using Huffman + coding. */ + +var kStaticContextMapContinuation = [64]uint32{ + 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} +var kStaticContextMapSimpleUTF8 = [64]uint32{ + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +func chooseContextMap(quality int, bigram_histo []uint32, num_literal_contexts *uint, literal_context_map *[]uint32) { + var monogram_histo = [3]uint32{0} + var two_prefix_histo = [6]uint32{0} + var total uint + var i uint + var dummy uint + var entropy [4]float64 + for i = 0; i < 9; i++ { + monogram_histo[i%3] += bigram_histo[i] + two_prefix_histo[i%6] += bigram_histo[i] + } + + entropy[1] = shannonEntropy(monogram_histo[:], 3, &dummy) + entropy[2] = (shannonEntropy(two_prefix_histo[:], 3, &dummy) + shannonEntropy(two_prefix_histo[3:], 3, &dummy)) + entropy[3] = 0 + for i = 0; i < 3; i++ { + entropy[3] += shannonEntropy(bigram_histo[3*i:], 3, &dummy) + } + + total = uint(monogram_histo[0] + monogram_histo[1] + monogram_histo[2]) + assert(total != 0) + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + entropy[3] *= entropy[0] + + if quality < minQualityForHqContextModeling { + /* 3 context models is a bit slower, don't use it at lower qualities. */ + entropy[3] = entropy[1] * 10 + } + + /* If expected savings by symbol are less than 0.2 bits, skip the + context modeling -- in exchange for faster decoding speed. */ + if entropy[1]-entropy[2] < 0.2 && entropy[1]-entropy[3] < 0.2 { + *num_literal_contexts = 1 + } else if entropy[2]-entropy[3] < 0.02 { + *num_literal_contexts = 2 + *literal_context_map = kStaticContextMapSimpleUTF8[:] + } else { + *num_literal_contexts = 3 + *literal_context_map = kStaticContextMapContinuation[:] + } +} + +/* Decide if we want to use a more complex static context map containing 13 + context values, based on the entropy reduction of histograms over the + first 5 bits of literals. */ + +var kStaticContextMapComplexUTF8 = [64]uint32{ + 11, 11, 12, 12, /* 0 special */ + 0, 0, 0, 0, /* 4 lf */ + 1, 1, 9, 9, /* 8 space */ + 2, 2, 2, 2, /* !, first after space/lf and after something else. */ + 1, 1, 1, 1, /* " */ + 8, 3, 3, 3, /* % */ + 1, 1, 1, 1, /* ({[ */ + 2, 2, 2, 2, /* }]) */ + 8, 4, 4, 4, /* :; */ + 8, 7, 4, 4, /* . */ + 8, 0, 0, 0, /* > */ + 3, 3, 3, 3, /* [0..9] */ + 5, 5, 10, 5, /* [A-Z] */ + 5, 5, 10, 5, + 6, 6, 6, 6, /* [a-z] */ + 6, 6, 6, 6, +} + +func shouldUseComplexStaticContextMap(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) bool { + /* Try the more complex static context map only for long data. */ + if size_hint < 1<<20 { + return false + } else { + var end_pos uint = start_pos + length + var combined_histo = [32]uint32{0} + var context_histo = [13][32]uint32{[32]uint32{0}} + var total uint32 = 0 + var entropy [3]float64 + var dummy uint + var i uint + var utf8_lut contextLUT = getContextLUT(contextUTF8) + /* To make entropy calculations faster and to fit on the stack, we collect + histograms over the 5 most significant bits of literals. One histogram + without context and 13 additional histograms for each context value. */ + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var stride_end_pos uint = start_pos + 64 + var prev2 byte = input[start_pos&mask] + var prev1 byte = input[(start_pos+1)&mask] + var pos uint + + /* To make the analysis of the data faster we only examine 64 byte long + strides at every 4kB intervals. */ + for pos = start_pos + 2; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + var context byte = byte(kStaticContextMapComplexUTF8[getContext(prev1, prev2, utf8_lut)]) + total++ + combined_histo[literal>>3]++ + context_histo[context][literal>>3]++ + prev2 = prev1 + prev1 = literal + } + } + + entropy[1] = shannonEntropy(combined_histo[:], 32, &dummy) + entropy[2] = 0 + for i = 0; i < 13; i++ { + entropy[2] += shannonEntropy(context_histo[i][0:], 32, &dummy) + } + + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + + /* The triggering heuristics below were tuned by compressing the individual + files of the silesia corpus. If we skip this kind of context modeling + for not very well compressible input (i.e. entropy using context modeling + is 60% of maximal entropy) or if expected savings by symbol are less + than 0.2 bits, then in every case when it triggers, the final compression + ratio is improved. Note however that this heuristics might be too strict + for some cases and could be tuned further. */ + if entropy[2] > 3.0 || entropy[1]-entropy[2] < 0.2 { + return false + } else { + *num_literal_contexts = 13 + *literal_context_map = kStaticContextMapComplexUTF8[:] + return true + } + } +} + +func decideOverLiteralContextModeling(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) { + if quality < minQualityForContextModeling || length < 64 { + return + } else if shouldUseComplexStaticContextMap(input, start_pos, length, mask, quality, size_hint, num_literal_contexts, literal_context_map) { + } else /* Context map was already set, nothing else to do. */ + { + var end_pos uint = start_pos + length + /* Gather bi-gram data of the UTF8 byte prefixes. To make the analysis of + UTF8 data faster we only examine 64 byte long strides at every 4kB + intervals. */ + + var bigram_prefix_histo = [9]uint32{0} + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var lut = [4]int{0, 0, 1, 2} + var stride_end_pos uint = start_pos + 64 + var prev int = lut[input[start_pos&mask]>>6] * 3 + var pos uint + for pos = start_pos + 1; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + bigram_prefix_histo[prev+lut[literal>>6]]++ + prev = lut[literal>>6] * 3 + } + } + + chooseContextMap(quality, bigram_prefix_histo[0:], num_literal_contexts, literal_context_map) + } +} + +func shouldCompress_encode(data []byte, mask uint, last_flush_pos uint64, bytes uint, num_literals uint, num_commands uint) bool { + /* TODO: find more precise minimal block overhead. */ + if bytes <= 2 { + return false + } + if num_commands < (bytes>>8)+2 { + if float64(num_literals) > 0.99*float64(bytes) { + var literal_histo = [256]uint32{0} + const kSampleRate uint32 = 13 + const kMinEntropy float64 = 7.92 + var bit_cost_threshold float64 = float64(bytes) * kMinEntropy / float64(kSampleRate) + var t uint = uint((uint32(bytes) + kSampleRate - 1) / kSampleRate) + var pos uint32 = uint32(last_flush_pos) + var i uint + for i = 0; i < t; i++ { + literal_histo[data[pos&uint32(mask)]]++ + pos += kSampleRate + } + + if bitsEntropy(literal_histo[:], 256) > bit_cost_threshold { + return false + } + } + } + + return true +} + +/* Chooses the literal context mode for a metablock */ +func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint, length uint) int { + /* We only do the computation for the option of something else than + CONTEXT_UTF8 for the highest qualities */ + if params.quality >= minQualityForHqBlockSplitting && !isMostlyUTF8(data, pos, mask, length, kMinUTF8Ratio) { + return contextSigned + } + + return contextUTF8 +} + +func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) { + var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos) + var last_bytes uint16 + var last_bytes_bits byte + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var block_params encoderParams = *params + + if bytes == 0 { + /* Write the ISLAST and ISEMPTY bits. */ + writeBits(2, 3, storage_ix, storage) + + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + if !shouldCompress_encode(data, mask, last_flush_pos, bytes, num_literals, uint(len(commands))) { + /* Restore the distance cache, as its last update by + CreateBackwardReferences is now unused. */ + copy(dist_cache, saved_dist_cache[:4]) + + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + return + } + + assert(*storage_ix <= 14) + last_bytes = uint16(storage[1])<<8 | uint16(storage[0]) + last_bytes_bits = byte(*storage_ix) + if params.quality <= maxQualityForStaticEntropyCodes { + storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else if params.quality < minQualityForBlockSplit { + storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else { + mb := getMetaBlockSplit() + if params.quality < minQualityForHqBlockSplitting { + var num_literal_contexts uint = 1 + var literal_context_map []uint32 = nil + if !params.disable_literal_context_modeling { + decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map) + } + + buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb) + } else { + buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb) + } + + if params.quality >= minQualityForOptimizeHistograms { + /* The number of distance symbols effectively used for distance + histograms. It might be less than distance alphabet size + for "Large Window Brotli" (32-bit). */ + var num_effective_dist_codes uint32 = block_params.dist.alphabet_size + if num_effective_dist_codes > numHistogramDistanceSymbols { + num_effective_dist_codes = numHistogramDistanceSymbols + } + + optimizeHistograms(num_effective_dist_codes, mb) + } + + storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage) + freeMetaBlockSplit(mb) + } + + if bytes+4 < *storage_ix>>3 { + /* Restore the distance cache and last byte. */ + copy(dist_cache, saved_dist_cache[:4]) + + storage[0] = byte(last_bytes) + storage[1] = byte(last_bytes >> 8) + *storage_ix = uint(last_bytes_bits) + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + } +} + +func chooseDistanceParams(params *encoderParams) { + var distance_postfix_bits uint32 = 0 + var num_direct_distance_codes uint32 = 0 + + if params.quality >= minQualityForNonzeroDistanceParams { + var ndirect_msb uint32 + if params.mode == modeFont { + distance_postfix_bits = 1 + num_direct_distance_codes = 12 + } else { + distance_postfix_bits = params.dist.distance_postfix_bits + num_direct_distance_codes = params.dist.num_direct_distance_codes + } + + ndirect_msb = (num_direct_distance_codes >> distance_postfix_bits) & 0x0F + if distance_postfix_bits > maxNpostfix || num_direct_distance_codes > maxNdirect || ndirect_msb<>25)), (last_command.dist_prefix_&0x3FF == 0), &last_command.cmd_prefix_) + } +} + +/* + Processes the accumulated input data and writes + the new output meta-block to s.dest, if one has been + created (otherwise the processed input data is buffered internally). + If |is_last| or |force_flush| is true, an output meta-block is + always created. However, until |is_last| is true encoder may retain up + to 7 bits of the last byte of output. To force encoder to dump the remaining + bits use WriteMetadata() to append an empty meta-data block. + Returns false if the size of the input data is larger than + input_block_size(). +*/ +func encodeData(s *Writer, is_last bool, force_flush bool) bool { + var delta uint64 = unprocessedInputSize(s) + var bytes uint32 = uint32(delta) + var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_) + var data []byte + var mask uint32 + var literal_context_mode int + + data = s.ringbuffer_.buffer_ + mask = s.ringbuffer_.mask_ + + /* Adding more blocks after "last" block is forbidden. */ + if s.is_last_block_emitted_ { + return false + } + if is_last { + s.is_last_block_emitted_ = true + } + + if delta > uint64(inputBlockSize(s)) { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) { + s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize) + s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize) + } else { + s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize] + s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize] + } + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + var storage []byte + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if delta == 0 && !is_last { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + storage = s.getStorage(int(2*bytes + 503)) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, uint(bytes), &table_size) + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage) + } + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + updateLastProcessedPos(s) + s.writeOutput(storage[:storage_ix>>3]) + return true + } + { + /* Theoretical max number of commands is 1 per 2 bytes. */ + newsize := len(s.commands) + int(bytes)/2 + 1 + if newsize > cap(s.commands) { + /* Reserve a bit more memory to allow merging with a next block + without reallocation: that would impact speed. */ + newsize += int(bytes/4) + 16 + + new_commands := make([]command, len(s.commands), newsize) + if s.commands != nil { + copy(new_commands, s.commands) + } + + s.commands = new_commands + } + } + + initOrStitchToPreviousBlock(&s.hasher_, data, uint(mask), &s.params, uint(wrapped_last_processed_pos), uint(bytes), is_last) + + literal_context_mode = chooseContextMode(&s.params, data, uint(wrapPosition(s.last_flush_pos_)), uint(mask), uint(s.input_pos_-s.last_flush_pos_)) + + if len(s.commands) != 0 && s.last_insert_len_ == 0 { + extendLastCommand(s, &bytes, &wrapped_last_processed_pos) + } + + if s.params.quality == zopflificationQuality { + assert(s.params.hasher.type_ == 10) + createZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_.(*h10), s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else if s.params.quality == hqZopflificationQuality { + assert(s.params.hasher.type_ == 10) + createHqZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else { + createBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } + { + var max_length uint = maxMetablockSize(&s.params) + var max_literals uint = max_length / 8 + max_commands := int(max_length / 8) + var processed_bytes uint = uint(s.input_pos_ - s.last_flush_pos_) + var next_input_fits_metablock bool = (processed_bytes+inputBlockSize(s) <= max_length) + var should_flush bool = (s.params.quality < minQualityForBlockSplit && s.num_literals_+uint(len(s.commands)) >= maxNumDelayedSymbols) + /* If maximal possible additional block doesn't fit metablock, flush now. */ + /* TODO: Postpone decision until next block arrives? */ + + /* If block splitting is not used, then flush as soon as there is some + amount of commands / literals produced. */ + if !is_last && !force_flush && !should_flush && next_input_fits_metablock && s.num_literals_ < max_literals && len(s.commands) < max_commands { + /* Merge with next input block. Everything will happen later. */ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + return true + } + } + + /* Create the last insert-only command. */ + if s.last_insert_len_ > 0 { + s.commands = append(s.commands, makeInsertCommand(s.last_insert_len_)) + s.num_literals_ += s.last_insert_len_ + s.last_insert_len_ = 0 + } + + if !is_last && s.input_pos_ == s.last_flush_pos_ { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + assert(s.input_pos_ >= s.last_flush_pos_) + assert(s.input_pos_ > s.last_flush_pos_ || is_last) + assert(s.input_pos_-s.last_flush_pos_ <= 1<<24) + { + var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_) + var storage []byte = s.getStorage(int(2*metablock_size + 503)) + var storage_ix uint = uint(s.last_bytes_bits_) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage) + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + s.last_flush_pos_ = s.input_pos_ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + if s.last_flush_pos_ > 0 { + s.prev_byte_ = data[(uint32(s.last_flush_pos_)-1)&mask] + } + + if s.last_flush_pos_ > 1 { + s.prev_byte2_ = data[uint32(s.last_flush_pos_-2)&mask] + } + + s.commands = s.commands[:0] + s.num_literals_ = 0 + + /* Save the state of the distance cache in case we need to restore it for + emitting an uncompressed block. */ + copy(s.saved_dist_cache_[:], s.dist_cache_[:]) + + s.writeOutput(storage[:storage_ix>>3]) + return true + } +} + +/* Dumps remaining output bits and metadata header to |header|. + Returns number of produced bytes. + REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long. + REQUIRED: |block_size| <= (1 << 24). */ +func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint { + storage_ix := uint(s.last_bytes_bits_) + header[0] = byte(s.last_bytes_) + header[1] = byte(s.last_bytes_ >> 8) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + writeBits(1, 0, &storage_ix, header) + writeBits(2, 3, &storage_ix, header) + writeBits(1, 0, &storage_ix, header) + if block_size == 0 { + writeBits(2, 0, &storage_ix, header) + } else { + var nbits uint32 + if block_size == 1 { + nbits = 0 + } else { + nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1 + } + var nbytes uint32 = (nbits + 7) / 8 + writeBits(2, uint64(nbytes), &storage_ix, header) + writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header) + } + + return (storage_ix + 7) >> 3 +} + +func injectBytePaddingBlock(s *Writer) { + var seal uint32 = uint32(s.last_bytes_) + var seal_bits uint = uint(s.last_bytes_bits_) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + /* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */ + seal |= 0x6 << seal_bits + + seal_bits += 6 + + destination := s.tiny_buf_.u8[:] + + destination[0] = byte(seal) + if seal_bits > 8 { + destination[1] = byte(seal >> 8) + } + if seal_bits > 16 { + destination[2] = byte(seal >> 16) + } + s.writeOutput(destination[:(seal_bits+7)>>3]) +} + +func checkFlushComplete(s *Writer) { + if s.stream_state_ == streamFlushRequested && s.err == nil { + s.stream_state_ = streamProcessing + } +} + +func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + var block_size_limit uint = uint(1) << s.params.lgwin + var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit)) + var command_buf []uint32 = nil + var literal_buf []byte = nil + if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) { + s.command_buf_ = make([]uint32, buf_size) + s.literal_buf_ = make([]byte, buf_size) + } else { + s.command_buf_ = s.command_buf_[:buf_size] + s.literal_buf_ = s.literal_buf_[:buf_size] + } + + command_buf = s.command_buf_ + literal_buf = s.literal_buf_ + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress block only when stream is not + finished, there is no pending flush request, and there is either + additional input or pending operation. */ + if s.stream_state_ == streamProcessing && (*available_in != 0 || op != int(operationProcess)) { + var block_size uint = brotli_min_size_t(block_size_limit, *available_in) + var is_last bool = (*available_in == block_size) && (op == int(operationFinish)) + var force_flush bool = (*available_in == block_size) && (op == int(operationFlush)) + var max_out_size uint = 2*block_size + 503 + var storage []byte = nil + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if force_flush && block_size == 0 { + s.stream_state_ = streamFlushRequested + continue + } + + storage = s.getStorage(int(max_out_size)) + + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, block_size, &table_size) + + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage) + } + + *next_in = (*next_in)[block_size:] + *available_in -= block_size + var out_bytes uint = storage_ix >> 3 + s.writeOutput(storage[:out_bytes]) + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + + break + } + + checkFlushComplete(s) + return true +} + +func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool { + if *available_in > 1<<24 { + return false + } + + /* Switch to metadata block workflow, if required. */ + if s.stream_state_ == streamProcessing { + s.remaining_metadata_bytes_ = uint32(*available_in) + s.stream_state_ = streamMetadataHead + } + + if s.stream_state_ != streamMetadataHead && s.stream_state_ != streamMetadataBody { + return false + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + if s.input_pos_ != s.last_flush_pos_ { + var result bool = encodeData(s, false, true) + if !result { + return false + } + continue + } + + if s.stream_state_ == streamMetadataHead { + n := writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.tiny_buf_.u8[:]) + s.writeOutput(s.tiny_buf_.u8[:n]) + s.stream_state_ = streamMetadataBody + continue + } else { + /* Exit workflow only when there is no more input and no more output. + Otherwise client may continue producing empty metadata blocks. */ + if s.remaining_metadata_bytes_ == 0 { + s.remaining_metadata_bytes_ = math.MaxUint32 + s.stream_state_ = streamProcessing + break + } + + /* This guarantees progress in "TakeOutput" workflow. */ + var c uint32 = brotli_min_uint32_t(s.remaining_metadata_bytes_, 16) + copy(s.tiny_buf_.u8[:], (*next_in)[:c]) + *next_in = (*next_in)[c:] + *available_in -= uint(c) + s.remaining_metadata_bytes_ -= c + s.writeOutput(s.tiny_buf_.u8[:c]) + + continue + } + } + + return true +} + +func updateSizeHint(s *Writer, available_in uint) { + if s.params.size_hint == 0 { + var delta uint64 = unprocessedInputSize(s) + var tail uint64 = uint64(available_in) + var limit uint32 = 1 << 30 + var total uint32 + if (delta >= uint64(limit)) || (tail >= uint64(limit)) || ((delta + tail) >= uint64(limit)) { + total = limit + } else { + total = uint32(delta + tail) + } + + s.params.size_hint = uint(total) + } +} + +func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + if !ensureInitialized(s) { + return false + } + + /* Unfinished metadata block; check requirements. */ + if s.remaining_metadata_bytes_ != math.MaxUint32 { + if uint32(*available_in) != s.remaining_metadata_bytes_ { + return false + } + if op != int(operationEmitMetadata) { + return false + } + } + + if op == int(operationEmitMetadata) { + updateSizeHint(s, 0) /* First data metablock might be emitted here. */ + return processMetadata(s, available_in, next_in) + } + + if s.stream_state_ == streamMetadataHead || s.stream_state_ == streamMetadataBody { + return false + } + + if s.stream_state_ != streamProcessing && *available_in != 0 { + return false + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + return encoderCompressStreamFast(s, op, available_in, next_in) + } + + for { + var remaining_block_size uint = remainingInputBlockSize(s) + + if remaining_block_size != 0 && *available_in != 0 { + var copy_input_size uint = brotli_min_size_t(remaining_block_size, *available_in) + copyInputToRingBuffer(s, copy_input_size, *next_in) + *next_in = (*next_in)[copy_input_size:] + *available_in -= copy_input_size + continue + } + + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress data only when stream is not + finished and there is no pending flush request. */ + if s.stream_state_ == streamProcessing { + if remaining_block_size == 0 || op != int(operationProcess) { + var is_last bool = ((*available_in == 0) && op == int(operationFinish)) + var force_flush bool = ((*available_in == 0) && op == int(operationFlush)) + var result bool + updateSizeHint(s, *available_in) + result = encodeData(s, is_last, force_flush) + if !result { + return false + } + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + } + + break + } + + checkFlushComplete(s) + return true +} + +func (w *Writer) writeOutput(data []byte) { + if w.err != nil { + return + } + + _, w.err = w.dst.Write(data) + if w.err == nil { + checkFlushComplete(w) + } +} diff --git a/vendor/github.com/andybalholm/brotli/encoder_dict.go b/vendor/github.com/andybalholm/brotli/encoder_dict.go new file mode 100644 index 00000000000..55c051c6238 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder_dict.go @@ -0,0 +1,22 @@ +package brotli + +/* Dictionary data (words and transforms) for 1 possible context */ +type encoderDictionary struct { + words *dictionary + cutoffTransformsCount uint32 + cutoffTransforms uint64 + hash_table []uint16 + buckets []uint16 + dict_words []dictWord +} + +func initEncoderDictionary(dict *encoderDictionary) { + dict.words = getDictionary() + + dict.hash_table = kStaticDictionaryHash[:] + dict.buckets = kStaticDictionaryBuckets[:] + dict.dict_words = kStaticDictionaryWords[:] + + dict.cutoffTransformsCount = kCutoffTransformsCount + dict.cutoffTransforms = kCutoffTransforms +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode.go b/vendor/github.com/andybalholm/brotli/entropy_encode.go new file mode 100644 index 00000000000..3f469a3dd94 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode.go @@ -0,0 +1,592 @@ +package brotli + +import "math" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Entropy encoding (Huffman) utilities. */ + +/* A node of a Huffman tree. */ +type huffmanTree struct { + total_count_ uint32 + index_left_ int16 + index_right_or_value_ int16 +} + +func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) { + self.total_count_ = count + self.index_left_ = left + self.index_right_or_value_ = right +} + +/* Input size optimized Shell sort. */ +type huffmanTreeComparator func(huffmanTree, huffmanTree) bool + +var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1} + +func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeComparator) { + if n < 13 { + /* Insertion sort. */ + var i uint + for i = 1; i < n; i++ { + var tmp huffmanTree = items[i] + var k uint = i + var j uint = i - 1 + for comparator(tmp, items[j]) { + items[k] = items[j] + k = j + if j == 0 { + break + } + j-- + } + + items[k] = tmp + } + + return + } else { + var g int + if n < 57 { + g = 2 + } else { + g = 0 + } + for ; g < 6; g++ { + var gap uint = sortHuffmanTreeItems_gaps[g] + var i uint + for i = gap; i < n; i++ { + var j uint = i + var tmp huffmanTree = items[i] + for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap { + items[j] = items[j-gap] + } + + items[j] = tmp + } + } + } +} + +/* Returns 1 if assignment of depths succeeded, otherwise 0. */ +func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool { + var stack [16]int + var level int = 0 + var p int = p0 + assert(max_depth <= 15) + stack[0] = -1 + for { + if pool[p].index_left_ >= 0 { + level++ + if level > max_depth { + return false + } + stack[level] = int(pool[p].index_right_or_value_) + p = int(pool[p].index_left_) + continue + } else { + depth[pool[p].index_right_or_value_] = byte(level) + } + + for level >= 0 && stack[level] == -1 { + level-- + } + if level < 0 { + return true + } + p = stack[level] + stack[level] = -1 + } +} + +/* Sort the root nodes, least popular first. */ +func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool { + if v0.total_count_ != v1.total_count_ { + return v0.total_count_ < v1.total_count_ + } + + return v0.index_right_or_value_ > v1.index_right_or_value_ +} + +/* This function will create a Huffman tree. + + The catch here is that the tree cannot be arbitrarily deep. + Brotli specifies a maximum depth of 15 bits for "code trees" + and 7 bits for "code length code trees." + + count_limit is the value that is to be faked as the minimum value + and this minimum value is raised until the tree matches the + maximum length requirement. + + This algorithm is not of excellent performance for very long data blocks, + especially when population counts are longer than 2**tree_limit, but + we are not planning to use this with extremely long blocks. + + See http://en.wikipedia.org/wiki/Huffman_coding */ +func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) { + var count_limit uint32 + var sentinel huffmanTree + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + /* For block sizes below 64 kB, we never need to do a second iteration + of this loop. Probably all of our block sizes will be smaller than + that, so this loop is mostly of academic interest. If we actually + would need this, we would be better off with the Katajainen algorithm. */ + for count_limit = 1; ; count_limit *= 2 { + var n uint = 0 + var i uint + var j uint + var k uint + for i = length; i != 0; { + i-- + if data[i] != 0 { + var count uint32 = brotli_max_uint32_t(data[i], count_limit) + initHuffmanTree(&tree[n], count, -1, int16(i)) + n++ + } + } + + if n == 1 { + depth[tree[0].index_right_or_value_] = 1 /* Only one element. */ + break + } + + sortHuffmanTreeItems(tree, n, huffmanTreeComparator(sortHuffmanTree)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + tree[n] = sentinel + + tree[n+1] = sentinel + + i = 0 /* Points to the next leaf node. */ + j = n + 1 /* Points to the next non-leaf node. */ + for k = n - 1; k != 0; k-- { + var left uint + var right uint + if tree[i].total_count_ <= tree[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if tree[i].total_count_ <= tree[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + { + /* The sentinel node becomes the parent node. */ + var j_end uint = 2*n - k + tree[j_end].total_count_ = tree[left].total_count_ + tree[right].total_count_ + tree[j_end].index_left_ = int16(left) + tree[j_end].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + tree[j_end+1] = sentinel + } + } + + if setDepth(int(2*n-1), tree[0:], depth, tree_limit) { + /* We need to pack the Huffman tree in tree_limit bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } +} + +func reverse(v []byte, start uint, end uint) { + end-- + for start < end { + var tmp byte = v[start] + v[start] = v[end] + v[end] = tmp + start++ + end-- + } +} + +func writeHuffmanTreeRepetitions(previous_value byte, value byte, repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + assert(repetitions > 0) + if previous_value != value { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions == 7 { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatPreviousCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x3) + (*tree_size)++ + repetitions >>= 2 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + if repetitions == 11 { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatZeroCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x7) + (*tree_size)++ + repetitions >>= 3 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +/* Change the population counts in a way that the consequent + Huffman tree compression, especially its RLE-part will be more + likely to compress this data more efficiently. + + length contains the size of the histogram. + counts contains the population counts. + good_for_rle is a buffer of at least length size */ +func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) { + var nonzero_count uint = 0 + var stride uint + var limit uint + var sum uint + var streak_limit uint = 1240 + var i uint + /* Let's make the Huffman code more compatible with RLE encoding. */ + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzero_count++ + } + } + + if nonzero_count < 16 { + return + } + + for length != 0 && counts[length-1] == 0 { + length-- + } + + if length == 0 { + return /* All zeros. */ + } + + /* Now counts[0..length - 1] does not have trailing zeros. */ + { + var nonzeros uint = 0 + var smallest_nonzero uint32 = 1 << 30 + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzeros++ + if smallest_nonzero > counts[i] { + smallest_nonzero = counts[i] + } + } + } + + if nonzeros < 5 { + /* Small histogram will model it well. */ + return + } + + if smallest_nonzero < 4 { + var zeros uint = length - nonzeros + if zeros < 6 { + for i = 1; i < length-1; i++ { + if counts[i-1] != 0 && counts[i] == 0 && counts[i+1] != 0 { + counts[i] = 1 + } + } + } + } + + if nonzeros < 28 { + return + } + } + + /* 2) Let's mark all population counts that already can be encoded + with an RLE code. */ + for i := 0; i < int(length); i++ { + good_for_rle[i] = 0 + } + { + var symbol uint32 = counts[0] + /* Let's not spoil any of the existing good RLE codes. + Mark any seq of 0's that is longer as 5 as a good_for_rle. + Mark any seq of non-0's that is longer as 7 as a good_for_rle. */ + + var step uint = 0 + for i = 0; i <= length; i++ { + if i == length || counts[i] != symbol { + if (symbol == 0 && step >= 5) || (symbol != 0 && step >= 7) { + var k uint + for k = 0; k < step; k++ { + good_for_rle[i-k-1] = 1 + } + } + + step = 1 + if i != length { + symbol = counts[i] + } + } else { + step++ + } + } + } + + /* 3) Let's replace those population counts that lead to more RLE codes. + Math here is in 24.8 fixed point representation. */ + stride = 0 + + limit = uint(256*(counts[0]+counts[1]+counts[2])/3 + 420) + sum = 0 + for i = 0; i <= length; i++ { + if i == length || good_for_rle[i] != 0 || (i != 0 && good_for_rle[i-1] != 0) || (256*counts[i]-uint32(limit)+uint32(streak_limit)) >= uint32(2*streak_limit) { + if stride >= 4 || (stride >= 3 && sum == 0) { + var k uint + var count uint = (sum + stride/2) / stride + /* The stride must end, collapse what we have, if we have enough (4). */ + if count == 0 { + count = 1 + } + + if sum == 0 { + /* Don't make an all zeros stride to be upgraded to ones. */ + count = 0 + } + + for k = 0; k < stride; k++ { + /* We don't want to change value at counts[i], + that is already belonging to the next stride. Thus - 1. */ + counts[i-k-1] = uint32(count) + } + } + + stride = 0 + sum = 0 + if i < length-2 { + /* All interesting strides have a count of at least 4, */ + /* at least when non-zeros. */ + limit = uint(256*(counts[i]+counts[i+1]+counts[i+2])/3 + 420) + } else if i < length { + limit = uint(256 * counts[i]) + } else { + limit = 0 + } + } + + stride++ + if i != length { + sum += uint(counts[i]) + if stride >= 4 { + limit = (256*sum + stride/2) / stride + } + + if stride == 4 { + limit += 120 + } + } + } +} + +func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use_rle_for_zero *bool) { + var total_reps_zero uint = 0 + var total_reps_non_zero uint = 0 + var count_reps_zero uint = 1 + var count_reps_non_zero uint = 1 + var i uint + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + if reps >= 3 && value == 0 { + total_reps_zero += reps + count_reps_zero++ + } + + if reps >= 4 && value != 0 { + total_reps_non_zero += reps + count_reps_non_zero++ + } + + i += reps + } + + *use_rle_for_non_zero = total_reps_non_zero > count_reps_non_zero*2 + *use_rle_for_zero = total_reps_zero > count_reps_zero*2 +} + +/* Write a Huffman tree from bit depths into the bit-stream representation + of a Huffman tree. The generated Huffman tree is to be compressed once + more using a Huffman tree */ +func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + var previous_value byte = initialRepeatedCodeLength + var i uint + var use_rle_for_non_zero bool = false + var use_rle_for_zero bool = false + var new_length uint = length + /* Throw away trailing zeros. */ + for i = 0; i < length; i++ { + if depth[length-i-1] == 0 { + new_length-- + } else { + break + } + } + + /* First gather statistics on if it is a good idea to do RLE. */ + if length > 50 { + /* Find RLE coding for longer codes. + Shorter codes seem not to benefit from RLE. */ + decideOverRLEUse(depth, new_length, &use_rle_for_non_zero, &use_rle_for_zero) + } + + /* Actual RLE coding. */ + for i = 0; i < new_length; { + var value byte = depth[i] + var reps uint = 1 + if (value != 0 && use_rle_for_non_zero) || (value == 0 && use_rle_for_zero) { + var k uint + for k = i + 1; k < new_length && depth[k] == value; k++ { + reps++ + } + } + + if value == 0 { + writeHuffmanTreeRepetitionsZeros(reps, tree_size, tree, extra_bits_data) + } else { + writeHuffmanTreeRepetitions(previous_value, value, reps, tree_size, tree, extra_bits_data) + previous_value = value + } + + i += reps + } +} + +var reverseBits_kLut = [16]uint{ + 0x00, + 0x08, + 0x04, + 0x0C, + 0x02, + 0x0A, + 0x06, + 0x0E, + 0x01, + 0x09, + 0x05, + 0x0D, + 0x03, + 0x0B, + 0x07, + 0x0F, +} + +func reverseBits(num_bits uint, bits uint16) uint16 { + var retval uint = reverseBits_kLut[bits&0x0F] + var i uint + for i = 4; i < num_bits; i += 4 { + retval <<= 4 + bits = uint16(bits >> 4) + retval |= reverseBits_kLut[bits&0x0F] + } + + retval >>= ((0 - num_bits) & 0x03) + return uint16(retval) +} + +/* 0..15 are values for bits */ +const maxHuffmanBits = 16 + +/* Get the actual bit values for a tree of bit depths. */ +func convertBitDepthsToSymbols(depth []byte, len uint, bits []uint16) { + var bl_count = [maxHuffmanBits]uint16{0} + var next_code [maxHuffmanBits]uint16 + var i uint + /* In Brotli, all bit depths are [1..15] + 0 bit depth means that the symbol does not exist. */ + + var code int = 0 + for i = 0; i < len; i++ { + bl_count[depth[i]]++ + } + + bl_count[0] = 0 + next_code[0] = 0 + for i = 1; i < maxHuffmanBits; i++ { + code = (code + int(bl_count[i-1])) << 1 + next_code[i] = uint16(code) + } + + for i = 0; i < len; i++ { + if depth[i] != 0 { + bits[i] = reverseBits(uint(depth[i]), next_code[depth[i]]) + next_code[depth[i]]++ + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go new file mode 100644 index 00000000000..5ddf3fcbaef --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go @@ -0,0 +1,4394 @@ +package brotli + +var kCodeLengthDepth = [18]byte{4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 0, 4, 4} + +var kStaticCommandCodeDepth = [numCommandSymbols]byte{ + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, +} + +var kStaticDistanceCodeDepth = [64]byte{ + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, +} + +var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7} + +func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) { + writeBits(40, 0x0000FF55555554, storage_ix, storage) +} + +var kZeroRepsBits = [numCommandSymbols]uint64{ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000007, + 0x00000017, + 0x00000027, + 0x00000037, + 0x00000047, + 0x00000057, + 0x00000067, + 0x00000077, + 0x00000770, + 0x00000b87, + 0x00001387, + 0x00001b87, + 0x00002387, + 0x00002b87, + 0x00003387, + 0x00003b87, + 0x00000397, + 0x00000b97, + 0x00001397, + 0x00001b97, + 0x00002397, + 0x00002b97, + 0x00003397, + 0x00003b97, + 0x000003a7, + 0x00000ba7, + 0x000013a7, + 0x00001ba7, + 0x000023a7, + 0x00002ba7, + 0x000033a7, + 0x00003ba7, + 0x000003b7, + 0x00000bb7, + 0x000013b7, + 0x00001bb7, + 0x000023b7, + 0x00002bb7, + 0x000033b7, + 0x00003bb7, + 0x000003c7, + 0x00000bc7, + 0x000013c7, + 0x00001bc7, + 0x000023c7, + 0x00002bc7, + 0x000033c7, + 0x00003bc7, + 0x000003d7, + 0x00000bd7, + 0x000013d7, + 0x00001bd7, + 0x000023d7, + 0x00002bd7, + 0x000033d7, + 0x00003bd7, + 0x000003e7, + 0x00000be7, + 0x000013e7, + 0x00001be7, + 0x000023e7, + 0x00002be7, + 0x000033e7, + 0x00003be7, + 0x000003f7, + 0x00000bf7, + 0x000013f7, + 0x00001bf7, + 0x000023f7, + 0x00002bf7, + 0x000033f7, + 0x00003bf7, + 0x0001c387, + 0x0005c387, + 0x0009c387, + 0x000dc387, + 0x0011c387, + 0x0015c387, + 0x0019c387, + 0x001dc387, + 0x0001cb87, + 0x0005cb87, + 0x0009cb87, + 0x000dcb87, + 0x0011cb87, + 0x0015cb87, + 0x0019cb87, + 0x001dcb87, + 0x0001d387, + 0x0005d387, + 0x0009d387, + 0x000dd387, + 0x0011d387, + 0x0015d387, + 0x0019d387, + 0x001dd387, + 0x0001db87, + 0x0005db87, + 0x0009db87, + 0x000ddb87, + 0x0011db87, + 0x0015db87, + 0x0019db87, + 0x001ddb87, + 0x0001e387, + 0x0005e387, + 0x0009e387, + 0x000de387, + 0x0011e387, + 0x0015e387, + 0x0019e387, + 0x001de387, + 0x0001eb87, + 0x0005eb87, + 0x0009eb87, + 0x000deb87, + 0x0011eb87, + 0x0015eb87, + 0x0019eb87, + 0x001deb87, + 0x0001f387, + 0x0005f387, + 0x0009f387, + 0x000df387, + 0x0011f387, + 0x0015f387, + 0x0019f387, + 0x001df387, + 0x0001fb87, + 0x0005fb87, + 0x0009fb87, + 0x000dfb87, + 0x0011fb87, + 0x0015fb87, + 0x0019fb87, + 0x001dfb87, + 0x0001c397, + 0x0005c397, + 0x0009c397, + 0x000dc397, + 0x0011c397, + 0x0015c397, + 0x0019c397, + 0x001dc397, + 0x0001cb97, + 0x0005cb97, + 0x0009cb97, + 0x000dcb97, + 0x0011cb97, + 0x0015cb97, + 0x0019cb97, + 0x001dcb97, + 0x0001d397, + 0x0005d397, + 0x0009d397, + 0x000dd397, + 0x0011d397, + 0x0015d397, + 0x0019d397, + 0x001dd397, + 0x0001db97, + 0x0005db97, + 0x0009db97, + 0x000ddb97, + 0x0011db97, + 0x0015db97, + 0x0019db97, + 0x001ddb97, + 0x0001e397, + 0x0005e397, + 0x0009e397, + 0x000de397, + 0x0011e397, + 0x0015e397, + 0x0019e397, + 0x001de397, + 0x0001eb97, + 0x0005eb97, + 0x0009eb97, + 0x000deb97, + 0x0011eb97, + 0x0015eb97, + 0x0019eb97, + 0x001deb97, + 0x0001f397, + 0x0005f397, + 0x0009f397, + 0x000df397, + 0x0011f397, + 0x0015f397, + 0x0019f397, + 0x001df397, + 0x0001fb97, + 0x0005fb97, + 0x0009fb97, + 0x000dfb97, + 0x0011fb97, + 0x0015fb97, + 0x0019fb97, + 0x001dfb97, + 0x0001c3a7, + 0x0005c3a7, + 0x0009c3a7, + 0x000dc3a7, + 0x0011c3a7, + 0x0015c3a7, + 0x0019c3a7, + 0x001dc3a7, + 0x0001cba7, + 0x0005cba7, + 0x0009cba7, + 0x000dcba7, + 0x0011cba7, + 0x0015cba7, + 0x0019cba7, + 0x001dcba7, + 0x0001d3a7, + 0x0005d3a7, + 0x0009d3a7, + 0x000dd3a7, + 0x0011d3a7, + 0x0015d3a7, + 0x0019d3a7, + 0x001dd3a7, + 0x0001dba7, + 0x0005dba7, + 0x0009dba7, + 0x000ddba7, + 0x0011dba7, + 0x0015dba7, + 0x0019dba7, + 0x001ddba7, + 0x0001e3a7, + 0x0005e3a7, + 0x0009e3a7, + 0x000de3a7, + 0x0011e3a7, + 0x0015e3a7, + 0x0019e3a7, + 0x001de3a7, + 0x0001eba7, + 0x0005eba7, + 0x0009eba7, + 0x000deba7, + 0x0011eba7, + 0x0015eba7, + 0x0019eba7, + 0x001deba7, + 0x0001f3a7, + 0x0005f3a7, + 0x0009f3a7, + 0x000df3a7, + 0x0011f3a7, + 0x0015f3a7, + 0x0019f3a7, + 0x001df3a7, + 0x0001fba7, + 0x0005fba7, + 0x0009fba7, + 0x000dfba7, + 0x0011fba7, + 0x0015fba7, + 0x0019fba7, + 0x001dfba7, + 0x0001c3b7, + 0x0005c3b7, + 0x0009c3b7, + 0x000dc3b7, + 0x0011c3b7, + 0x0015c3b7, + 0x0019c3b7, + 0x001dc3b7, + 0x0001cbb7, + 0x0005cbb7, + 0x0009cbb7, + 0x000dcbb7, + 0x0011cbb7, + 0x0015cbb7, + 0x0019cbb7, + 0x001dcbb7, + 0x0001d3b7, + 0x0005d3b7, + 0x0009d3b7, + 0x000dd3b7, + 0x0011d3b7, + 0x0015d3b7, + 0x0019d3b7, + 0x001dd3b7, + 0x0001dbb7, + 0x0005dbb7, + 0x0009dbb7, + 0x000ddbb7, + 0x0011dbb7, + 0x0015dbb7, + 0x0019dbb7, + 0x001ddbb7, + 0x0001e3b7, + 0x0005e3b7, + 0x0009e3b7, + 0x000de3b7, + 0x0011e3b7, + 0x0015e3b7, + 0x0019e3b7, + 0x001de3b7, + 0x0001ebb7, + 0x0005ebb7, + 0x0009ebb7, + 0x000debb7, + 0x0011ebb7, + 0x0015ebb7, + 0x0019ebb7, + 0x001debb7, + 0x0001f3b7, + 0x0005f3b7, + 0x0009f3b7, + 0x000df3b7, + 0x0011f3b7, + 0x0015f3b7, + 0x0019f3b7, + 0x001df3b7, + 0x0001fbb7, + 0x0005fbb7, + 0x0009fbb7, + 0x000dfbb7, + 0x0011fbb7, + 0x0015fbb7, + 0x0019fbb7, + 0x001dfbb7, + 0x0001c3c7, + 0x0005c3c7, + 0x0009c3c7, + 0x000dc3c7, + 0x0011c3c7, + 0x0015c3c7, + 0x0019c3c7, + 0x001dc3c7, + 0x0001cbc7, + 0x0005cbc7, + 0x0009cbc7, + 0x000dcbc7, + 0x0011cbc7, + 0x0015cbc7, + 0x0019cbc7, + 0x001dcbc7, + 0x0001d3c7, + 0x0005d3c7, + 0x0009d3c7, + 0x000dd3c7, + 0x0011d3c7, + 0x0015d3c7, + 0x0019d3c7, + 0x001dd3c7, + 0x0001dbc7, + 0x0005dbc7, + 0x0009dbc7, + 0x000ddbc7, + 0x0011dbc7, + 0x0015dbc7, + 0x0019dbc7, + 0x001ddbc7, + 0x0001e3c7, + 0x0005e3c7, + 0x0009e3c7, + 0x000de3c7, + 0x0011e3c7, + 0x0015e3c7, + 0x0019e3c7, + 0x001de3c7, + 0x0001ebc7, + 0x0005ebc7, + 0x0009ebc7, + 0x000debc7, + 0x0011ebc7, + 0x0015ebc7, + 0x0019ebc7, + 0x001debc7, + 0x0001f3c7, + 0x0005f3c7, + 0x0009f3c7, + 0x000df3c7, + 0x0011f3c7, + 0x0015f3c7, + 0x0019f3c7, + 0x001df3c7, + 0x0001fbc7, + 0x0005fbc7, + 0x0009fbc7, + 0x000dfbc7, + 0x0011fbc7, + 0x0015fbc7, + 0x0019fbc7, + 0x001dfbc7, + 0x0001c3d7, + 0x0005c3d7, + 0x0009c3d7, + 0x000dc3d7, + 0x0011c3d7, + 0x0015c3d7, + 0x0019c3d7, + 0x001dc3d7, + 0x0001cbd7, + 0x0005cbd7, + 0x0009cbd7, + 0x000dcbd7, + 0x0011cbd7, + 0x0015cbd7, + 0x0019cbd7, + 0x001dcbd7, + 0x0001d3d7, + 0x0005d3d7, + 0x0009d3d7, + 0x000dd3d7, + 0x0011d3d7, + 0x0015d3d7, + 0x0019d3d7, + 0x001dd3d7, + 0x0001dbd7, + 0x0005dbd7, + 0x0009dbd7, + 0x000ddbd7, + 0x0011dbd7, + 0x0015dbd7, + 0x0019dbd7, + 0x001ddbd7, + 0x0001e3d7, + 0x0005e3d7, + 0x0009e3d7, + 0x000de3d7, + 0x0011e3d7, + 0x0015e3d7, + 0x0019e3d7, + 0x001de3d7, + 0x0001ebd7, + 0x0005ebd7, + 0x0009ebd7, + 0x000debd7, + 0x0011ebd7, + 0x0015ebd7, + 0x0019ebd7, + 0x001debd7, + 0x0001f3d7, + 0x0005f3d7, + 0x0009f3d7, + 0x000df3d7, + 0x0011f3d7, + 0x0015f3d7, + 0x0019f3d7, + 0x001df3d7, + 0x0001fbd7, + 0x0005fbd7, + 0x0009fbd7, + 0x000dfbd7, + 0x0011fbd7, + 0x0015fbd7, + 0x0019fbd7, + 0x001dfbd7, + 0x0001c3e7, + 0x0005c3e7, + 0x0009c3e7, + 0x000dc3e7, + 0x0011c3e7, + 0x0015c3e7, + 0x0019c3e7, + 0x001dc3e7, + 0x0001cbe7, + 0x0005cbe7, + 0x0009cbe7, + 0x000dcbe7, + 0x0011cbe7, + 0x0015cbe7, + 0x0019cbe7, + 0x001dcbe7, + 0x0001d3e7, + 0x0005d3e7, + 0x0009d3e7, + 0x000dd3e7, + 0x0011d3e7, + 0x0015d3e7, + 0x0019d3e7, + 0x001dd3e7, + 0x0001dbe7, + 0x0005dbe7, + 0x0009dbe7, + 0x000ddbe7, + 0x0011dbe7, + 0x0015dbe7, + 0x0019dbe7, + 0x001ddbe7, + 0x0001e3e7, + 0x0005e3e7, + 0x0009e3e7, + 0x000de3e7, + 0x0011e3e7, + 0x0015e3e7, + 0x0019e3e7, + 0x001de3e7, + 0x0001ebe7, + 0x0005ebe7, + 0x0009ebe7, + 0x000debe7, + 0x0011ebe7, + 0x0015ebe7, + 0x0019ebe7, + 0x001debe7, + 0x0001f3e7, + 0x0005f3e7, + 0x0009f3e7, + 0x000df3e7, + 0x0011f3e7, + 0x0015f3e7, + 0x0019f3e7, + 0x001df3e7, + 0x0001fbe7, + 0x0005fbe7, + 0x0009fbe7, + 0x000dfbe7, + 0x0011fbe7, + 0x0015fbe7, + 0x0019fbe7, + 0x001dfbe7, + 0x0001c3f7, + 0x0005c3f7, + 0x0009c3f7, + 0x000dc3f7, + 0x0011c3f7, + 0x0015c3f7, + 0x0019c3f7, + 0x001dc3f7, + 0x0001cbf7, + 0x0005cbf7, + 0x0009cbf7, + 0x000dcbf7, + 0x0011cbf7, + 0x0015cbf7, + 0x0019cbf7, + 0x001dcbf7, + 0x0001d3f7, + 0x0005d3f7, + 0x0009d3f7, + 0x000dd3f7, + 0x0011d3f7, + 0x0015d3f7, + 0x0019d3f7, + 0x001dd3f7, + 0x0001dbf7, + 0x0005dbf7, + 0x0009dbf7, + 0x000ddbf7, + 0x0011dbf7, + 0x0015dbf7, + 0x0019dbf7, + 0x001ddbf7, + 0x0001e3f7, + 0x0005e3f7, + 0x0009e3f7, + 0x000de3f7, + 0x0011e3f7, + 0x0015e3f7, + 0x0019e3f7, + 0x001de3f7, + 0x0001ebf7, + 0x0005ebf7, + 0x0009ebf7, + 0x000debf7, + 0x0011ebf7, + 0x0015ebf7, + 0x0019ebf7, + 0x001debf7, + 0x0001f3f7, + 0x0005f3f7, + 0x0009f3f7, + 0x000df3f7, + 0x0011f3f7, + 0x0015f3f7, + 0x0019f3f7, + 0x001df3f7, + 0x0001fbf7, + 0x0005fbf7, + 0x0009fbf7, + 0x000dfbf7, + 0x0011fbf7, + 0x0015fbf7, + 0x0019fbf7, + 0x001dfbf7, + 0x00e1c387, + 0x02e1c387, + 0x04e1c387, + 0x06e1c387, + 0x08e1c387, + 0x0ae1c387, + 0x0ce1c387, + 0x0ee1c387, + 0x00e5c387, + 0x02e5c387, + 0x04e5c387, + 0x06e5c387, + 0x08e5c387, + 0x0ae5c387, + 0x0ce5c387, + 0x0ee5c387, + 0x00e9c387, + 0x02e9c387, + 0x04e9c387, + 0x06e9c387, + 0x08e9c387, + 0x0ae9c387, + 0x0ce9c387, + 0x0ee9c387, + 0x00edc387, + 0x02edc387, + 0x04edc387, + 0x06edc387, + 0x08edc387, + 0x0aedc387, + 0x0cedc387, + 0x0eedc387, + 0x00f1c387, + 0x02f1c387, + 0x04f1c387, + 0x06f1c387, + 0x08f1c387, + 0x0af1c387, + 0x0cf1c387, + 0x0ef1c387, + 0x00f5c387, + 0x02f5c387, + 0x04f5c387, + 0x06f5c387, + 0x08f5c387, + 0x0af5c387, + 0x0cf5c387, + 0x0ef5c387, + 0x00f9c387, + 0x02f9c387, + 0x04f9c387, + 0x06f9c387, + 0x08f9c387, + 0x0af9c387, + 0x0cf9c387, + 0x0ef9c387, + 0x00fdc387, + 0x02fdc387, + 0x04fdc387, + 0x06fdc387, + 0x08fdc387, + 0x0afdc387, + 0x0cfdc387, + 0x0efdc387, + 0x00e1cb87, + 0x02e1cb87, + 0x04e1cb87, + 0x06e1cb87, + 0x08e1cb87, + 0x0ae1cb87, + 0x0ce1cb87, + 0x0ee1cb87, + 0x00e5cb87, + 0x02e5cb87, + 0x04e5cb87, + 0x06e5cb87, + 0x08e5cb87, + 0x0ae5cb87, + 0x0ce5cb87, + 0x0ee5cb87, + 0x00e9cb87, + 0x02e9cb87, + 0x04e9cb87, + 0x06e9cb87, + 0x08e9cb87, + 0x0ae9cb87, + 0x0ce9cb87, + 0x0ee9cb87, + 0x00edcb87, + 0x02edcb87, + 0x04edcb87, + 0x06edcb87, + 0x08edcb87, + 0x0aedcb87, + 0x0cedcb87, + 0x0eedcb87, + 0x00f1cb87, + 0x02f1cb87, + 0x04f1cb87, + 0x06f1cb87, + 0x08f1cb87, + 0x0af1cb87, + 0x0cf1cb87, + 0x0ef1cb87, + 0x00f5cb87, + 0x02f5cb87, + 0x04f5cb87, + 0x06f5cb87, + 0x08f5cb87, + 0x0af5cb87, + 0x0cf5cb87, + 0x0ef5cb87, + 0x00f9cb87, + 0x02f9cb87, + 0x04f9cb87, + 0x06f9cb87, + 0x08f9cb87, +} + +var kZeroRepsDepth = [numCommandSymbols]uint32{ + 0, + 4, + 8, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 11, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, +} + +var kNonZeroRepsBits = [numCommandSymbols]uint64{ + 0x0000000b, + 0x0000001b, + 0x0000002b, + 0x0000003b, + 0x000002cb, + 0x000006cb, + 0x00000acb, + 0x00000ecb, + 0x000002db, + 0x000006db, + 0x00000adb, + 0x00000edb, + 0x000002eb, + 0x000006eb, + 0x00000aeb, + 0x00000eeb, + 0x000002fb, + 0x000006fb, + 0x00000afb, + 0x00000efb, + 0x0000b2cb, + 0x0001b2cb, + 0x0002b2cb, + 0x0003b2cb, + 0x0000b6cb, + 0x0001b6cb, + 0x0002b6cb, + 0x0003b6cb, + 0x0000bacb, + 0x0001bacb, + 0x0002bacb, + 0x0003bacb, + 0x0000becb, + 0x0001becb, + 0x0002becb, + 0x0003becb, + 0x0000b2db, + 0x0001b2db, + 0x0002b2db, + 0x0003b2db, + 0x0000b6db, + 0x0001b6db, + 0x0002b6db, + 0x0003b6db, + 0x0000badb, + 0x0001badb, + 0x0002badb, + 0x0003badb, + 0x0000bedb, + 0x0001bedb, + 0x0002bedb, + 0x0003bedb, + 0x0000b2eb, + 0x0001b2eb, + 0x0002b2eb, + 0x0003b2eb, + 0x0000b6eb, + 0x0001b6eb, + 0x0002b6eb, + 0x0003b6eb, + 0x0000baeb, + 0x0001baeb, + 0x0002baeb, + 0x0003baeb, + 0x0000beeb, + 0x0001beeb, + 0x0002beeb, + 0x0003beeb, + 0x0000b2fb, + 0x0001b2fb, + 0x0002b2fb, + 0x0003b2fb, + 0x0000b6fb, + 0x0001b6fb, + 0x0002b6fb, + 0x0003b6fb, + 0x0000bafb, + 0x0001bafb, + 0x0002bafb, + 0x0003bafb, + 0x0000befb, + 0x0001befb, + 0x0002befb, + 0x0003befb, + 0x002cb2cb, + 0x006cb2cb, + 0x00acb2cb, + 0x00ecb2cb, + 0x002db2cb, + 0x006db2cb, + 0x00adb2cb, + 0x00edb2cb, + 0x002eb2cb, + 0x006eb2cb, + 0x00aeb2cb, + 0x00eeb2cb, + 0x002fb2cb, + 0x006fb2cb, + 0x00afb2cb, + 0x00efb2cb, + 0x002cb6cb, + 0x006cb6cb, + 0x00acb6cb, + 0x00ecb6cb, + 0x002db6cb, + 0x006db6cb, + 0x00adb6cb, + 0x00edb6cb, + 0x002eb6cb, + 0x006eb6cb, + 0x00aeb6cb, + 0x00eeb6cb, + 0x002fb6cb, + 0x006fb6cb, + 0x00afb6cb, + 0x00efb6cb, + 0x002cbacb, + 0x006cbacb, + 0x00acbacb, + 0x00ecbacb, + 0x002dbacb, + 0x006dbacb, + 0x00adbacb, + 0x00edbacb, + 0x002ebacb, + 0x006ebacb, + 0x00aebacb, + 0x00eebacb, + 0x002fbacb, + 0x006fbacb, + 0x00afbacb, + 0x00efbacb, + 0x002cbecb, + 0x006cbecb, + 0x00acbecb, + 0x00ecbecb, + 0x002dbecb, + 0x006dbecb, + 0x00adbecb, + 0x00edbecb, + 0x002ebecb, + 0x006ebecb, + 0x00aebecb, + 0x00eebecb, + 0x002fbecb, + 0x006fbecb, + 0x00afbecb, + 0x00efbecb, + 0x002cb2db, + 0x006cb2db, + 0x00acb2db, + 0x00ecb2db, + 0x002db2db, + 0x006db2db, + 0x00adb2db, + 0x00edb2db, + 0x002eb2db, + 0x006eb2db, + 0x00aeb2db, + 0x00eeb2db, + 0x002fb2db, + 0x006fb2db, + 0x00afb2db, + 0x00efb2db, + 0x002cb6db, + 0x006cb6db, + 0x00acb6db, + 0x00ecb6db, + 0x002db6db, + 0x006db6db, + 0x00adb6db, + 0x00edb6db, + 0x002eb6db, + 0x006eb6db, + 0x00aeb6db, + 0x00eeb6db, + 0x002fb6db, + 0x006fb6db, + 0x00afb6db, + 0x00efb6db, + 0x002cbadb, + 0x006cbadb, + 0x00acbadb, + 0x00ecbadb, + 0x002dbadb, + 0x006dbadb, + 0x00adbadb, + 0x00edbadb, + 0x002ebadb, + 0x006ebadb, + 0x00aebadb, + 0x00eebadb, + 0x002fbadb, + 0x006fbadb, + 0x00afbadb, + 0x00efbadb, + 0x002cbedb, + 0x006cbedb, + 0x00acbedb, + 0x00ecbedb, + 0x002dbedb, + 0x006dbedb, + 0x00adbedb, + 0x00edbedb, + 0x002ebedb, + 0x006ebedb, + 0x00aebedb, + 0x00eebedb, + 0x002fbedb, + 0x006fbedb, + 0x00afbedb, + 0x00efbedb, + 0x002cb2eb, + 0x006cb2eb, + 0x00acb2eb, + 0x00ecb2eb, + 0x002db2eb, + 0x006db2eb, + 0x00adb2eb, + 0x00edb2eb, + 0x002eb2eb, + 0x006eb2eb, + 0x00aeb2eb, + 0x00eeb2eb, + 0x002fb2eb, + 0x006fb2eb, + 0x00afb2eb, + 0x00efb2eb, + 0x002cb6eb, + 0x006cb6eb, + 0x00acb6eb, + 0x00ecb6eb, + 0x002db6eb, + 0x006db6eb, + 0x00adb6eb, + 0x00edb6eb, + 0x002eb6eb, + 0x006eb6eb, + 0x00aeb6eb, + 0x00eeb6eb, + 0x002fb6eb, + 0x006fb6eb, + 0x00afb6eb, + 0x00efb6eb, + 0x002cbaeb, + 0x006cbaeb, + 0x00acbaeb, + 0x00ecbaeb, + 0x002dbaeb, + 0x006dbaeb, + 0x00adbaeb, + 0x00edbaeb, + 0x002ebaeb, + 0x006ebaeb, + 0x00aebaeb, + 0x00eebaeb, + 0x002fbaeb, + 0x006fbaeb, + 0x00afbaeb, + 0x00efbaeb, + 0x002cbeeb, + 0x006cbeeb, + 0x00acbeeb, + 0x00ecbeeb, + 0x002dbeeb, + 0x006dbeeb, + 0x00adbeeb, + 0x00edbeeb, + 0x002ebeeb, + 0x006ebeeb, + 0x00aebeeb, + 0x00eebeeb, + 0x002fbeeb, + 0x006fbeeb, + 0x00afbeeb, + 0x00efbeeb, + 0x002cb2fb, + 0x006cb2fb, + 0x00acb2fb, + 0x00ecb2fb, + 0x002db2fb, + 0x006db2fb, + 0x00adb2fb, + 0x00edb2fb, + 0x002eb2fb, + 0x006eb2fb, + 0x00aeb2fb, + 0x00eeb2fb, + 0x002fb2fb, + 0x006fb2fb, + 0x00afb2fb, + 0x00efb2fb, + 0x002cb6fb, + 0x006cb6fb, + 0x00acb6fb, + 0x00ecb6fb, + 0x002db6fb, + 0x006db6fb, + 0x00adb6fb, + 0x00edb6fb, + 0x002eb6fb, + 0x006eb6fb, + 0x00aeb6fb, + 0x00eeb6fb, + 0x002fb6fb, + 0x006fb6fb, + 0x00afb6fb, + 0x00efb6fb, + 0x002cbafb, + 0x006cbafb, + 0x00acbafb, + 0x00ecbafb, + 0x002dbafb, + 0x006dbafb, + 0x00adbafb, + 0x00edbafb, + 0x002ebafb, + 0x006ebafb, + 0x00aebafb, + 0x00eebafb, + 0x002fbafb, + 0x006fbafb, + 0x00afbafb, + 0x00efbafb, + 0x002cbefb, + 0x006cbefb, + 0x00acbefb, + 0x00ecbefb, + 0x002dbefb, + 0x006dbefb, + 0x00adbefb, + 0x00edbefb, + 0x002ebefb, + 0x006ebefb, + 0x00aebefb, + 0x00eebefb, + 0x002fbefb, + 0x006fbefb, + 0x00afbefb, + 0x00efbefb, + 0x0b2cb2cb, + 0x1b2cb2cb, + 0x2b2cb2cb, + 0x3b2cb2cb, + 0x0b6cb2cb, + 0x1b6cb2cb, + 0x2b6cb2cb, + 0x3b6cb2cb, + 0x0bacb2cb, + 0x1bacb2cb, + 0x2bacb2cb, + 0x3bacb2cb, + 0x0becb2cb, + 0x1becb2cb, + 0x2becb2cb, + 0x3becb2cb, + 0x0b2db2cb, + 0x1b2db2cb, + 0x2b2db2cb, + 0x3b2db2cb, + 0x0b6db2cb, + 0x1b6db2cb, + 0x2b6db2cb, + 0x3b6db2cb, + 0x0badb2cb, + 0x1badb2cb, + 0x2badb2cb, + 0x3badb2cb, + 0x0bedb2cb, + 0x1bedb2cb, + 0x2bedb2cb, + 0x3bedb2cb, + 0x0b2eb2cb, + 0x1b2eb2cb, + 0x2b2eb2cb, + 0x3b2eb2cb, + 0x0b6eb2cb, + 0x1b6eb2cb, + 0x2b6eb2cb, + 0x3b6eb2cb, + 0x0baeb2cb, + 0x1baeb2cb, + 0x2baeb2cb, + 0x3baeb2cb, + 0x0beeb2cb, + 0x1beeb2cb, + 0x2beeb2cb, + 0x3beeb2cb, + 0x0b2fb2cb, + 0x1b2fb2cb, + 0x2b2fb2cb, + 0x3b2fb2cb, + 0x0b6fb2cb, + 0x1b6fb2cb, + 0x2b6fb2cb, + 0x3b6fb2cb, + 0x0bafb2cb, + 0x1bafb2cb, + 0x2bafb2cb, + 0x3bafb2cb, + 0x0befb2cb, + 0x1befb2cb, + 0x2befb2cb, + 0x3befb2cb, + 0x0b2cb6cb, + 0x1b2cb6cb, + 0x2b2cb6cb, + 0x3b2cb6cb, + 0x0b6cb6cb, + 0x1b6cb6cb, + 0x2b6cb6cb, + 0x3b6cb6cb, + 0x0bacb6cb, + 0x1bacb6cb, + 0x2bacb6cb, + 0x3bacb6cb, + 0x0becb6cb, + 0x1becb6cb, + 0x2becb6cb, + 0x3becb6cb, + 0x0b2db6cb, + 0x1b2db6cb, + 0x2b2db6cb, + 0x3b2db6cb, + 0x0b6db6cb, + 0x1b6db6cb, + 0x2b6db6cb, + 0x3b6db6cb, + 0x0badb6cb, + 0x1badb6cb, + 0x2badb6cb, + 0x3badb6cb, + 0x0bedb6cb, + 0x1bedb6cb, + 0x2bedb6cb, + 0x3bedb6cb, + 0x0b2eb6cb, + 0x1b2eb6cb, + 0x2b2eb6cb, + 0x3b2eb6cb, + 0x0b6eb6cb, + 0x1b6eb6cb, + 0x2b6eb6cb, + 0x3b6eb6cb, + 0x0baeb6cb, + 0x1baeb6cb, + 0x2baeb6cb, + 0x3baeb6cb, + 0x0beeb6cb, + 0x1beeb6cb, + 0x2beeb6cb, + 0x3beeb6cb, + 0x0b2fb6cb, + 0x1b2fb6cb, + 0x2b2fb6cb, + 0x3b2fb6cb, + 0x0b6fb6cb, + 0x1b6fb6cb, + 0x2b6fb6cb, + 0x3b6fb6cb, + 0x0bafb6cb, + 0x1bafb6cb, + 0x2bafb6cb, + 0x3bafb6cb, + 0x0befb6cb, + 0x1befb6cb, + 0x2befb6cb, + 0x3befb6cb, + 0x0b2cbacb, + 0x1b2cbacb, + 0x2b2cbacb, + 0x3b2cbacb, + 0x0b6cbacb, + 0x1b6cbacb, + 0x2b6cbacb, + 0x3b6cbacb, + 0x0bacbacb, + 0x1bacbacb, + 0x2bacbacb, + 0x3bacbacb, + 0x0becbacb, + 0x1becbacb, + 0x2becbacb, + 0x3becbacb, + 0x0b2dbacb, + 0x1b2dbacb, + 0x2b2dbacb, + 0x3b2dbacb, + 0x0b6dbacb, + 0x1b6dbacb, + 0x2b6dbacb, + 0x3b6dbacb, + 0x0badbacb, + 0x1badbacb, + 0x2badbacb, + 0x3badbacb, + 0x0bedbacb, + 0x1bedbacb, + 0x2bedbacb, + 0x3bedbacb, + 0x0b2ebacb, + 0x1b2ebacb, + 0x2b2ebacb, + 0x3b2ebacb, + 0x0b6ebacb, + 0x1b6ebacb, + 0x2b6ebacb, + 0x3b6ebacb, + 0x0baebacb, + 0x1baebacb, + 0x2baebacb, + 0x3baebacb, + 0x0beebacb, + 0x1beebacb, + 0x2beebacb, + 0x3beebacb, + 0x0b2fbacb, + 0x1b2fbacb, + 0x2b2fbacb, + 0x3b2fbacb, + 0x0b6fbacb, + 0x1b6fbacb, + 0x2b6fbacb, + 0x3b6fbacb, + 0x0bafbacb, + 0x1bafbacb, + 0x2bafbacb, + 0x3bafbacb, + 0x0befbacb, + 0x1befbacb, + 0x2befbacb, + 0x3befbacb, + 0x0b2cbecb, + 0x1b2cbecb, + 0x2b2cbecb, + 0x3b2cbecb, + 0x0b6cbecb, + 0x1b6cbecb, + 0x2b6cbecb, + 0x3b6cbecb, + 0x0bacbecb, + 0x1bacbecb, + 0x2bacbecb, + 0x3bacbecb, + 0x0becbecb, + 0x1becbecb, + 0x2becbecb, + 0x3becbecb, + 0x0b2dbecb, + 0x1b2dbecb, + 0x2b2dbecb, + 0x3b2dbecb, + 0x0b6dbecb, + 0x1b6dbecb, + 0x2b6dbecb, + 0x3b6dbecb, + 0x0badbecb, + 0x1badbecb, + 0x2badbecb, + 0x3badbecb, + 0x0bedbecb, + 0x1bedbecb, + 0x2bedbecb, + 0x3bedbecb, + 0x0b2ebecb, + 0x1b2ebecb, + 0x2b2ebecb, + 0x3b2ebecb, + 0x0b6ebecb, + 0x1b6ebecb, + 0x2b6ebecb, + 0x3b6ebecb, + 0x0baebecb, + 0x1baebecb, + 0x2baebecb, + 0x3baebecb, + 0x0beebecb, + 0x1beebecb, + 0x2beebecb, + 0x3beebecb, + 0x0b2fbecb, + 0x1b2fbecb, + 0x2b2fbecb, + 0x3b2fbecb, + 0x0b6fbecb, + 0x1b6fbecb, + 0x2b6fbecb, + 0x3b6fbecb, + 0x0bafbecb, + 0x1bafbecb, + 0x2bafbecb, + 0x3bafbecb, + 0x0befbecb, + 0x1befbecb, + 0x2befbecb, + 0x3befbecb, + 0x0b2cb2db, + 0x1b2cb2db, + 0x2b2cb2db, + 0x3b2cb2db, + 0x0b6cb2db, + 0x1b6cb2db, + 0x2b6cb2db, + 0x3b6cb2db, + 0x0bacb2db, + 0x1bacb2db, + 0x2bacb2db, + 0x3bacb2db, + 0x0becb2db, + 0x1becb2db, + 0x2becb2db, + 0x3becb2db, + 0x0b2db2db, + 0x1b2db2db, + 0x2b2db2db, + 0x3b2db2db, + 0x0b6db2db, + 0x1b6db2db, + 0x2b6db2db, + 0x3b6db2db, + 0x0badb2db, + 0x1badb2db, + 0x2badb2db, + 0x3badb2db, + 0x0bedb2db, + 0x1bedb2db, + 0x2bedb2db, + 0x3bedb2db, + 0x0b2eb2db, + 0x1b2eb2db, + 0x2b2eb2db, + 0x3b2eb2db, + 0x0b6eb2db, + 0x1b6eb2db, + 0x2b6eb2db, + 0x3b6eb2db, + 0x0baeb2db, + 0x1baeb2db, + 0x2baeb2db, + 0x3baeb2db, + 0x0beeb2db, + 0x1beeb2db, + 0x2beeb2db, + 0x3beeb2db, + 0x0b2fb2db, + 0x1b2fb2db, + 0x2b2fb2db, + 0x3b2fb2db, + 0x0b6fb2db, + 0x1b6fb2db, + 0x2b6fb2db, + 0x3b6fb2db, + 0x0bafb2db, + 0x1bafb2db, + 0x2bafb2db, + 0x3bafb2db, + 0x0befb2db, + 0x1befb2db, + 0x2befb2db, + 0x3befb2db, + 0x0b2cb6db, + 0x1b2cb6db, + 0x2b2cb6db, + 0x3b2cb6db, + 0x0b6cb6db, + 0x1b6cb6db, + 0x2b6cb6db, + 0x3b6cb6db, + 0x0bacb6db, + 0x1bacb6db, + 0x2bacb6db, + 0x3bacb6db, + 0x0becb6db, + 0x1becb6db, + 0x2becb6db, + 0x3becb6db, + 0x0b2db6db, + 0x1b2db6db, + 0x2b2db6db, + 0x3b2db6db, + 0x0b6db6db, + 0x1b6db6db, + 0x2b6db6db, + 0x3b6db6db, + 0x0badb6db, + 0x1badb6db, + 0x2badb6db, + 0x3badb6db, + 0x0bedb6db, + 0x1bedb6db, + 0x2bedb6db, + 0x3bedb6db, + 0x0b2eb6db, + 0x1b2eb6db, + 0x2b2eb6db, + 0x3b2eb6db, + 0x0b6eb6db, + 0x1b6eb6db, + 0x2b6eb6db, + 0x3b6eb6db, + 0x0baeb6db, + 0x1baeb6db, + 0x2baeb6db, + 0x3baeb6db, +} + +var kNonZeroRepsDepth = [numCommandSymbols]uint32{ + 6, + 6, + 6, + 6, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, +} + +var kStaticCommandCodeBits = [numCommandSymbols]uint16{ + 0, + 256, + 128, + 384, + 64, + 320, + 192, + 448, + 32, + 288, + 160, + 416, + 96, + 352, + 224, + 480, + 16, + 272, + 144, + 400, + 80, + 336, + 208, + 464, + 48, + 304, + 176, + 432, + 112, + 368, + 240, + 496, + 8, + 264, + 136, + 392, + 72, + 328, + 200, + 456, + 40, + 296, + 168, + 424, + 104, + 360, + 232, + 488, + 24, + 280, + 152, + 408, + 88, + 344, + 216, + 472, + 56, + 312, + 184, + 440, + 120, + 376, + 248, + 504, + 4, + 260, + 132, + 388, + 68, + 324, + 196, + 452, + 36, + 292, + 164, + 420, + 100, + 356, + 228, + 484, + 20, + 276, + 148, + 404, + 84, + 340, + 212, + 468, + 52, + 308, + 180, + 436, + 116, + 372, + 244, + 500, + 12, + 268, + 140, + 396, + 76, + 332, + 204, + 460, + 44, + 300, + 172, + 428, + 108, + 364, + 236, + 492, + 28, + 284, + 156, + 412, + 92, + 348, + 220, + 476, + 60, + 316, + 188, + 444, + 124, + 380, + 252, + 508, + 2, + 258, + 130, + 386, + 66, + 322, + 194, + 450, + 34, + 290, + 162, + 418, + 98, + 354, + 226, + 482, + 18, + 274, + 146, + 402, + 82, + 338, + 210, + 466, + 50, + 306, + 178, + 434, + 114, + 370, + 242, + 498, + 10, + 266, + 138, + 394, + 74, + 330, + 202, + 458, + 42, + 298, + 170, + 426, + 106, + 362, + 234, + 490, + 26, + 282, + 154, + 410, + 90, + 346, + 218, + 474, + 58, + 314, + 186, + 442, + 122, + 378, + 250, + 506, + 6, + 262, + 134, + 390, + 70, + 326, + 198, + 454, + 38, + 294, + 166, + 422, + 102, + 358, + 230, + 486, + 22, + 278, + 150, + 406, + 86, + 342, + 214, + 470, + 54, + 310, + 182, + 438, + 118, + 374, + 246, + 502, + 14, + 270, + 142, + 398, + 78, + 334, + 206, + 462, + 46, + 302, + 174, + 430, + 110, + 366, + 238, + 494, + 30, + 286, + 158, + 414, + 94, + 350, + 222, + 478, + 62, + 318, + 190, + 446, + 126, + 382, + 254, + 510, + 1, + 257, + 129, + 385, + 65, + 321, + 193, + 449, + 33, + 289, + 161, + 417, + 97, + 353, + 225, + 481, + 17, + 273, + 145, + 401, + 81, + 337, + 209, + 465, + 49, + 305, + 177, + 433, + 113, + 369, + 241, + 497, + 9, + 265, + 137, + 393, + 73, + 329, + 201, + 457, + 41, + 297, + 169, + 425, + 105, + 361, + 233, + 489, + 25, + 281, + 153, + 409, + 89, + 345, + 217, + 473, + 57, + 313, + 185, + 441, + 121, + 377, + 249, + 505, + 5, + 261, + 133, + 389, + 69, + 325, + 197, + 453, + 37, + 293, + 165, + 421, + 101, + 357, + 229, + 485, + 21, + 277, + 149, + 405, + 85, + 341, + 213, + 469, + 53, + 309, + 181, + 437, + 117, + 373, + 245, + 501, + 13, + 269, + 141, + 397, + 77, + 333, + 205, + 461, + 45, + 301, + 173, + 429, + 109, + 365, + 237, + 493, + 29, + 285, + 157, + 413, + 93, + 349, + 221, + 477, + 61, + 317, + 189, + 445, + 125, + 381, + 253, + 509, + 3, + 259, + 131, + 387, + 67, + 323, + 195, + 451, + 35, + 291, + 163, + 419, + 99, + 355, + 227, + 483, + 19, + 275, + 147, + 403, + 83, + 339, + 211, + 467, + 51, + 307, + 179, + 435, + 115, + 371, + 243, + 499, + 11, + 267, + 139, + 395, + 75, + 331, + 203, + 459, + 43, + 299, + 171, + 427, + 107, + 363, + 235, + 491, + 27, + 283, + 155, + 411, + 91, + 347, + 219, + 475, + 59, + 315, + 187, + 443, + 123, + 379, + 251, + 507, + 7, + 1031, + 519, + 1543, + 263, + 1287, + 775, + 1799, + 135, + 1159, + 647, + 1671, + 391, + 1415, + 903, + 1927, + 71, + 1095, + 583, + 1607, + 327, + 1351, + 839, + 1863, + 199, + 1223, + 711, + 1735, + 455, + 1479, + 967, + 1991, + 39, + 1063, + 551, + 1575, + 295, + 1319, + 807, + 1831, + 167, + 1191, + 679, + 1703, + 423, + 1447, + 935, + 1959, + 103, + 1127, + 615, + 1639, + 359, + 1383, + 871, + 1895, + 231, + 1255, + 743, + 1767, + 487, + 1511, + 999, + 2023, + 23, + 1047, + 535, + 1559, + 279, + 1303, + 791, + 1815, + 151, + 1175, + 663, + 1687, + 407, + 1431, + 919, + 1943, + 87, + 1111, + 599, + 1623, + 343, + 1367, + 855, + 1879, + 215, + 1239, + 727, + 1751, + 471, + 1495, + 983, + 2007, + 55, + 1079, + 567, + 1591, + 311, + 1335, + 823, + 1847, + 183, + 1207, + 695, + 1719, + 439, + 1463, + 951, + 1975, + 119, + 1143, + 631, + 1655, + 375, + 1399, + 887, + 1911, + 247, + 1271, + 759, + 1783, + 503, + 1527, + 1015, + 2039, + 15, + 1039, + 527, + 1551, + 271, + 1295, + 783, + 1807, + 143, + 1167, + 655, + 1679, + 399, + 1423, + 911, + 1935, + 79, + 1103, + 591, + 1615, + 335, + 1359, + 847, + 1871, + 207, + 1231, + 719, + 1743, + 463, + 1487, + 975, + 1999, + 47, + 1071, + 559, + 1583, + 303, + 1327, + 815, + 1839, + 175, + 1199, + 687, + 1711, + 431, + 1455, + 943, + 1967, + 111, + 1135, + 623, + 1647, + 367, + 1391, + 879, + 1903, + 239, + 1263, + 751, + 1775, + 495, + 1519, + 1007, + 2031, + 31, + 1055, + 543, + 1567, + 287, + 1311, + 799, + 1823, + 159, + 1183, + 671, + 1695, + 415, + 1439, + 927, + 1951, + 95, + 1119, + 607, + 1631, + 351, + 1375, + 863, + 1887, + 223, + 1247, + 735, + 1759, + 479, + 1503, + 991, + 2015, + 63, + 1087, + 575, + 1599, + 319, + 1343, + 831, + 1855, + 191, + 1215, + 703, + 1727, + 447, + 1471, + 959, + 1983, + 127, + 1151, + 639, + 1663, + 383, + 1407, + 895, + 1919, + 255, + 1279, + 767, + 1791, + 511, + 1535, + 1023, + 2047, +} + +func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(56, 0x92624416307003, storage_ix, storage) + writeBits(3, 0x00000000, storage_ix, storage) +} + +var kStaticDistanceCodeBits = [64]uint16{ + 0, + 32, + 16, + 48, + 8, + 40, + 24, + 56, + 4, + 36, + 20, + 52, + 12, + 44, + 28, + 60, + 2, + 34, + 18, + 50, + 10, + 42, + 26, + 58, + 6, + 38, + 22, + 54, + 14, + 46, + 30, + 62, + 1, + 33, + 17, + 49, + 9, + 41, + 25, + 57, + 5, + 37, + 21, + 53, + 13, + 45, + 29, + 61, + 3, + 35, + 19, + 51, + 11, + 43, + 27, + 59, + 7, + 39, + 23, + 55, + 15, + 47, + 31, + 63, +} + +func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(28, 0x0369DC03, storage_ix, storage) +} diff --git a/vendor/github.com/andybalholm/brotli/fast_log.go b/vendor/github.com/andybalholm/brotli/fast_log.go new file mode 100644 index 00000000000..9d6607f7e2f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/fast_log.go @@ -0,0 +1,290 @@ +package brotli + +import ( + "math" + "math/bits" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for fast computation of logarithms. */ + +func log2FloorNonZero(n uint) uint32 { + return uint32(bits.Len(n)) - 1 +} + +/* A lookup table for small values of log2(int) to be used in entropy + computation. + + ", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */ +var kLog2Table = []float32{ + 0.0000000000000000, + 0.0000000000000000, + 1.0000000000000000, + 1.5849625007211563, + 2.0000000000000000, + 2.3219280948873622, + 2.5849625007211561, + 2.8073549220576042, + 3.0000000000000000, + 3.1699250014423126, + 3.3219280948873626, + 3.4594316186372978, + 3.5849625007211565, + 3.7004397181410922, + 3.8073549220576037, + 3.9068905956085187, + 4.0000000000000000, + 4.0874628412503400, + 4.1699250014423122, + 4.2479275134435852, + 4.3219280948873626, + 4.3923174227787607, + 4.4594316186372973, + 4.5235619560570131, + 4.5849625007211570, + 4.6438561897747244, + 4.7004397181410926, + 4.7548875021634691, + 4.8073549220576037, + 4.8579809951275728, + 4.9068905956085187, + 4.9541963103868758, + 5.0000000000000000, + 5.0443941193584534, + 5.0874628412503400, + 5.1292830169449664, + 5.1699250014423122, + 5.2094533656289501, + 5.2479275134435852, + 5.2854022188622487, + 5.3219280948873626, + 5.3575520046180838, + 5.3923174227787607, + 5.4262647547020979, + 5.4594316186372973, + 5.4918530963296748, + 5.5235619560570131, + 5.5545888516776376, + 5.5849625007211570, + 5.6147098441152083, + 5.6438561897747244, + 5.6724253419714961, + 5.7004397181410926, + 5.7279204545631996, + 5.7548875021634691, + 5.7813597135246599, + 5.8073549220576046, + 5.8328900141647422, + 5.8579809951275719, + 5.8826430493618416, + 5.9068905956085187, + 5.9307373375628867, + 5.9541963103868758, + 5.9772799234999168, + 6.0000000000000000, + 6.0223678130284544, + 6.0443941193584534, + 6.0660891904577721, + 6.0874628412503400, + 6.1085244567781700, + 6.1292830169449672, + 6.1497471195046822, + 6.1699250014423122, + 6.1898245588800176, + 6.2094533656289510, + 6.2288186904958804, + 6.2479275134435861, + 6.2667865406949019, + 6.2854022188622487, + 6.3037807481771031, + 6.3219280948873617, + 6.3398500028846252, + 6.3575520046180847, + 6.3750394313469254, + 6.3923174227787598, + 6.4093909361377026, + 6.4262647547020979, + 6.4429434958487288, + 6.4594316186372982, + 6.4757334309663976, + 6.4918530963296748, + 6.5077946401986964, + 6.5235619560570131, + 6.5391588111080319, + 6.5545888516776376, + 6.5698556083309478, + 6.5849625007211561, + 6.5999128421871278, + 6.6147098441152092, + 6.6293566200796095, + 6.6438561897747253, + 6.6582114827517955, + 6.6724253419714952, + 6.6865005271832185, + 6.7004397181410917, + 6.7142455176661224, + 6.7279204545631988, + 6.7414669864011465, + 6.7548875021634691, + 6.7681843247769260, + 6.7813597135246599, + 6.7944158663501062, + 6.8073549220576037, + 6.8201789624151887, + 6.8328900141647422, + 6.8454900509443757, + 6.8579809951275719, + 6.8703647195834048, + 6.8826430493618416, + 6.8948177633079437, + 6.9068905956085187, + 6.9188632372745955, + 6.9307373375628867, + 6.9425145053392399, + 6.9541963103868758, + 6.9657842846620879, + 6.9772799234999168, + 6.9886846867721664, + 7.0000000000000000, + 7.0112272554232540, + 7.0223678130284544, + 7.0334230015374501, + 7.0443941193584534, + 7.0552824355011898, + 7.0660891904577721, + 7.0768155970508317, + 7.0874628412503400, + 7.0980320829605272, + 7.1085244567781700, + 7.1189410727235076, + 7.1292830169449664, + 7.1395513523987937, + 7.1497471195046822, + 7.1598713367783891, + 7.1699250014423130, + 7.1799090900149345, + 7.1898245588800176, + 7.1996723448363644, + 7.2094533656289492, + 7.2191685204621621, + 7.2288186904958804, + 7.2384047393250794, + 7.2479275134435861, + 7.2573878426926521, + 7.2667865406949019, + 7.2761244052742384, + 7.2854022188622487, + 7.2946207488916270, + 7.3037807481771031, + 7.3128829552843557, + 7.3219280948873617, + 7.3309168781146177, + 7.3398500028846243, + 7.3487281542310781, + 7.3575520046180847, + 7.3663222142458151, + 7.3750394313469254, + 7.3837042924740528, + 7.3923174227787607, + 7.4008794362821844, + 7.4093909361377026, + 7.4178525148858991, + 7.4262647547020979, + 7.4346282276367255, + 7.4429434958487288, + 7.4512111118323299, + 7.4594316186372973, + 7.4676055500829976, + 7.4757334309663976, + 7.4838157772642564, + 7.4918530963296748, + 7.4998458870832057, + 7.5077946401986964, + 7.5156998382840436, + 7.5235619560570131, + 7.5313814605163119, + 7.5391588111080319, + 7.5468944598876373, + 7.5545888516776376, + 7.5622424242210728, + 7.5698556083309478, + 7.5774288280357487, + 7.5849625007211561, + 7.5924570372680806, + 7.5999128421871278, + 7.6073303137496113, + 7.6147098441152075, + 7.6220518194563764, + 7.6293566200796095, + 7.6366246205436488, + 7.6438561897747244, + 7.6510516911789290, + 7.6582114827517955, + 7.6653359171851765, + 7.6724253419714952, + 7.6794800995054464, + 7.6865005271832185, + 7.6934869574993252, + 7.7004397181410926, + 7.7073591320808825, + 7.7142455176661224, + 7.7210991887071856, + 7.7279204545631996, + 7.7347096202258392, + 7.7414669864011465, + 7.7481928495894596, + 7.7548875021634691, + 7.7615512324444795, + 7.7681843247769260, + 7.7747870596011737, + 7.7813597135246608, + 7.7879025593914317, + 7.7944158663501062, + 7.8008998999203047, + 7.8073549220576037, + 7.8137811912170374, + 7.8201789624151887, + 7.8265484872909159, + 7.8328900141647422, + 7.8392037880969445, + 7.8454900509443757, + 7.8517490414160571, + 7.8579809951275719, + 7.8641861446542798, + 7.8703647195834048, + 7.8765169465650002, + 7.8826430493618425, + 7.8887432488982601, + 7.8948177633079446, + 7.9008668079807496, + 7.9068905956085187, + 7.9128893362299619, + 7.9188632372745955, + 7.9248125036057813, + 7.9307373375628867, + 7.9366379390025719, + 7.9425145053392399, + 7.9483672315846778, + 7.9541963103868758, + 7.9600019320680806, + 7.9657842846620870, + 7.9715435539507720, + 7.9772799234999168, + 7.9829935746943104, + 7.9886846867721664, + 7.9943534368588578, +} + +/* Faster logarithm for small integers, with the property of log2(0) == 0. */ +func fastLog2(v uint) float64 { + if v < uint(len(kLog2Table)) { + return float64(kLog2Table[v]) + } + + return math.Log2(float64(v)) +} diff --git a/vendor/github.com/andybalholm/brotli/find_match_length.go b/vendor/github.com/andybalholm/brotli/find_match_length.go new file mode 100644 index 00000000000..09d2ae67268 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/find_match_length.go @@ -0,0 +1,45 @@ +package brotli + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find maximal matching prefixes of strings. */ +func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint { + var matched uint = 0 + _, _ = s1[limit-1], s2[limit-1] // bounds check + switch runtime.GOARCH { + case "amd64": + // Compare 8 bytes at at time. + for matched+8 <= limit { + w1 := binary.LittleEndian.Uint64(s1[matched:]) + w2 := binary.LittleEndian.Uint64(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros64(w1^w2)>>3) + } + matched += 8 + } + case "386": + // Compare 4 bytes at at time. + for matched+4 <= limit { + w1 := binary.LittleEndian.Uint32(s1[matched:]) + w2 := binary.LittleEndian.Uint32(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros32(w1^w2)>>3) + } + matched += 4 + } + } + for matched < limit && s1[matched] == s2[matched] { + matched++ + } + return matched +} diff --git a/vendor/github.com/andybalholm/brotli/go.mod b/vendor/github.com/andybalholm/brotli/go.mod new file mode 100644 index 00000000000..1c94232c742 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/go.mod @@ -0,0 +1,5 @@ +module github.com/andybalholm/brotli + +go 1.12 + +retract v1.0.1 // occasional panics and data corruption diff --git a/vendor/github.com/andybalholm/brotli/go.sum b/vendor/github.com/andybalholm/brotli/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/andybalholm/brotli/h10.go b/vendor/github.com/andybalholm/brotli/h10.go new file mode 100644 index 00000000000..5662fbbbb52 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h10.go @@ -0,0 +1,287 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*h10) HashTypeLength() uint { + return 4 +} + +func (*h10) StoreLookahead() uint { + return 128 +} + +func hashBytesH10(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 17) +} + +/* A (forgetful) hash table where each hash bucket contains a binary tree of + sequences whose first 4 bytes share the same hash code. + Each sequence is 128 long and is identified by its starting + position in the input data. The binary tree is sorted by the lexicographic + order of the sequences, and it is also a max-heap with respect to the + starting positions. */ +type h10 struct { + hasherCommon + window_mask_ uint + buckets_ [1 << 17]uint32 + invalid_pos_ uint32 + forest []uint32 +} + +func (h *h10) Initialize(params *encoderParams) { + h.window_mask_ = (1 << params.lgwin) - 1 + h.invalid_pos_ = uint32(0 - h.window_mask_) + var num_nodes uint = uint(1) << params.lgwin + h.forest = make([]uint32, 2*num_nodes) +} + +func (h *h10) Prepare(one_shot bool, input_size uint, data []byte) { + var invalid_pos uint32 = h.invalid_pos_ + var i uint32 + for i = 0; i < 1<<17; i++ { + h.buckets_[i] = invalid_pos + } +} + +func leftChildIndexH10(self *h10, pos uint) uint { + return 2 * (pos & self.window_mask_) +} + +func rightChildIndexH10(self *h10, pos uint) uint { + return 2*(pos&self.window_mask_) + 1 +} + +/* Stores the hash of the next 4 bytes and in a single tree-traversal, the + hash bucket's binary tree is searched for matches and is re-rooted at the + current position. + + If less than 128 data is available, the hash bucket of the + current position is searched for matches, but the state of the hash table + is not changed, since we can not know the final sorting order of the + current (incomplete) sequence. + + This function must be called with increasing cur_ix positions. */ +func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var max_comp_len uint = brotli_min_size_t(max_length, 128) + var should_reroot_tree bool = (max_length >= 128) + var key uint32 = hashBytesH10(data[cur_ix_masked:]) + var forest []uint32 = self.forest + var prev_ix uint = uint(self.buckets_[key]) + var node_left uint = leftChildIndexH10(self, cur_ix) + var node_right uint = rightChildIndexH10(self, cur_ix) + var best_len_left uint = 0 + var best_len_right uint = 0 + var depth_remaining uint + /* The forest index of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The forest index of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + if should_reroot_tree { + self.buckets_[key] = uint32(cur_ix) + } + + for depth_remaining = 64; ; depth_remaining-- { + var backward uint = cur_ix - prev_ix + var prev_ix_masked uint = prev_ix & ring_buffer_mask + if backward == 0 || backward > max_backward || depth_remaining == 0 { + if should_reroot_tree { + forest[node_left] = self.invalid_pos_ + forest[node_right] = self.invalid_pos_ + } + + break + } + { + var cur_len uint = brotli_min_size_t(best_len_left, best_len_right) + var len uint + assert(cur_len <= 128) + len = cur_len + findMatchLengthWithLimit(data[cur_ix_masked+cur_len:], data[prev_ix_masked+cur_len:], max_length-cur_len) + if matches != nil && len > *best_len { + *best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + + if len >= max_comp_len { + if should_reroot_tree { + forest[node_left] = forest[leftChildIndexH10(self, prev_ix)] + forest[node_right] = forest[rightChildIndexH10(self, prev_ix)] + } + + break + } + + if data[cur_ix_masked+len] > data[prev_ix_masked+len] { + best_len_left = uint(len) + if should_reroot_tree { + forest[node_left] = uint32(prev_ix) + } + + node_left = rightChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_left]) + } else { + best_len_right = uint(len) + if should_reroot_tree { + forest[node_right] = uint32(prev_ix) + } + + node_right = leftChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_right]) + } + } + } + + return matches +} + +/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the + length of max_length and stores the position cur_ix in the hash table. + + Sets *num_matches to the number of matches found, and stores the found + matches in matches[0] to matches[*num_matches - 1]. The matches will be + sorted by strictly increasing length and (non-strictly) increasing + distance. */ +func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint { + var orig_matches []backwardMatch = matches + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var best_len uint = 1 + var short_match_max_backward uint + if params.quality != hqZopflificationQuality { + short_match_max_backward = 16 + } else { + short_match_max_backward = 64 + } + var stop uint = cur_ix - short_match_max_backward + var dict_matches [maxStaticDictionaryMatchLen + 1]uint32 + var i uint + if cur_ix < short_match_max_backward { + stop = 0 + } + for i = cur_ix - 1; i > stop && best_len <= 2; i-- { + var prev_ix uint = i + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if data[cur_ix_masked] != data[prev_ix] || data[cur_ix_masked+1] != data[prev_ix+1] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len > best_len { + best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + } + } + + if best_len < max_length { + matches = storeAndFindMatchesH10(handle, data, cur_ix, ring_buffer_mask, max_length, max_backward, &best_len, matches) + } + + for i = 0; i <= maxStaticDictionaryMatchLen; i++ { + dict_matches[i] = kInvalidMatch + } + { + var minlen uint = brotli_max_size_t(4, best_len+1) + if findAllStaticDictionaryMatches(dictionary, data[cur_ix_masked:], minlen, max_length, dict_matches[0:]) { + var maxlen uint = brotli_min_size_t(maxStaticDictionaryMatchLen, max_length) + var l uint + for l = minlen; l <= maxlen; l++ { + var dict_id uint32 = dict_matches[l] + if dict_id < kInvalidMatch { + var distance uint = max_backward + gap + uint(dict_id>>5) + 1 + if distance <= params.dist.max_distance { + initDictionaryBackwardMatch(&matches[0], distance, l, uint(dict_id&31)) + matches = matches[1:] + } + } + } + } + } + + return uint(-cap(matches) + cap(orig_matches)) +} + +/* Stores the hash of the next 4 bytes and re-roots the binary tree at the + current sequence, without returning any matches. + REQUIRES: ix + 128 <= end-of-current-block */ +func (h *h10) Store(data []byte, mask uint, ix uint) { + var max_backward uint = h.window_mask_ - windowGap + 1 + /* Maximum distance is window size - 16, see section 9.1. of the spec. */ + storeAndFindMatchesH10(h, data, ix, mask, 128, max_backward, nil, nil) +} + +func (h *h10) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint = ix_start + var j uint = ix_start + if ix_start+63 <= ix_end { + i = ix_end - 63 + } + + if ix_start+512 <= i { + for ; j < i; j += 8 { + h.Store(data, mask, j) + } + } + + for ; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *h10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 128 { + var i_start uint = position - 128 + 1 + var i_end uint = brotli_min_size_t(position, i_start+num_bytes) + /* Store the last `128 - 1` positions in the hasher. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + + var i uint + for i = i_start; i < i_end; i++ { + /* Maximum distance is window size - 16, see section 9.1. of the spec. + Furthermore, we have to make sure that we don't look further back + from the start of the next block than the window size, otherwise we + could access already overwritten areas of the ring-buffer. */ + var max_backward uint = h.window_mask_ - brotli_max_size_t(windowGap-1, position-i) + + /* We know that i + 128 <= position + num_bytes, i.e. the + end of the current block and that we have at least + 128 tail in the ring-buffer. */ + storeAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil) + } + } +} + +/* MAX_NUM_MATCHES == 64 + MAX_TREE_SEARCH_DEPTH */ +const maxNumMatchesH10 = 128 + +func (*h10) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + panic("unimplemented") +} + +func (*h10) PrepareDistanceCache(distance_cache []int) { + panic("unimplemented") +} diff --git a/vendor/github.com/andybalholm/brotli/h5.go b/vendor/github.com/andybalholm/brotli/h5.go new file mode 100644 index 00000000000..f391b73fdd7 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h5.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h5) HashTypeLength() uint { + return 4 +} + +func (*h5) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH5(data []byte, shift int) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h5 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h5) Initialize(params *encoderParams) { + h.hash_shift_ = 32 - h.params.bucket_bits + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH5(data[i:], h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h5) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h5) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH5 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/h6.go b/vendor/github.com/andybalholm/brotli/h6.go new file mode 100644 index 00000000000..80bb224aa87 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h6.go @@ -0,0 +1,216 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h6) HashTypeLength() uint { + return 8 +} + +func (*h6) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH6(data []byte, mask uint64, shift int) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(data) & mask) * kHashMul64Long + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h6 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + hash_mask_ uint64 + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h6) Initialize(params *encoderParams) { + h.hash_shift_ = 64 - h.params.bucket_bits + h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len) + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH6(data[i:], h.hash_mask_, h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h6) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h6) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH6 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH6(data[cur_ix_masked:], h.hash_mask_, h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash.go b/vendor/github.com/andybalholm/brotli/hash.go new file mode 100644 index 00000000000..00f812e87ec --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash.go @@ -0,0 +1,342 @@ +package brotli + +import ( + "encoding/binary" + "fmt" +) + +type hasherCommon struct { + params hasherParams + is_prepared_ bool + dict_num_lookups uint + dict_num_matches uint +} + +func (h *hasherCommon) Common() *hasherCommon { + return h +} + +type hasherHandle interface { + Common() *hasherCommon + Initialize(params *encoderParams) + Prepare(one_shot bool, input_size uint, data []byte) + StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) + HashTypeLength() uint + StoreLookahead() uint + PrepareDistanceCache(distance_cache []int) + FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) + StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) + Store(data []byte, mask uint, ix uint) +} + +const kCutoffTransformsCount uint32 = 10 + +/* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */ +/* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */ +const kCutoffTransforms uint64 = 0x071B520ADA2D3200 + +type hasherSearchResult struct { + len uint + distance uint + score uint + len_code_delta int +} + +/* kHashMul32 multiplier has these properties: + * The multiplier must be odd. Otherwise we may lose the highest bit. + * No long streaks of ones or zeros. + * There is no effort to ensure that it is a prime, the oddity is enough + for this use. + * The number has been tuned heuristically against compression benchmarks. */ +const kHashMul32 uint32 = 0x1E35A7BD + +const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD + +const kHashMul64Long uint64 = 0x1FE35A7BD3579BD3 + +func hash14(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 14) +} + +func prepareDistanceCache(distance_cache []int, num_distances int) { + if num_distances > 4 { + var last_distance int = distance_cache[0] + distance_cache[4] = last_distance - 1 + distance_cache[5] = last_distance + 1 + distance_cache[6] = last_distance - 2 + distance_cache[7] = last_distance + 2 + distance_cache[8] = last_distance - 3 + distance_cache[9] = last_distance + 3 + if num_distances > 10 { + var next_last_distance int = distance_cache[1] + distance_cache[10] = next_last_distance - 1 + distance_cache[11] = next_last_distance + 1 + distance_cache[12] = next_last_distance - 2 + distance_cache[13] = next_last_distance + 2 + distance_cache[14] = next_last_distance - 3 + distance_cache[15] = next_last_distance + 3 + } + } +} + +const literalByteScore = 135 + +const distanceBitPenalty = 30 + +/* Score must be positive after applying maximal penalty. */ +const scoreBase = (distanceBitPenalty * 8 * 8) + +/* Usually, we always choose the longest backward reference. This function + allows for the exception of that rule. + + If we choose a backward reference that is further away, it will + usually be coded with more bits. We approximate this by assuming + log2(distance). If the distance can be expressed in terms of the + last four distances, we use some heuristic constants to estimate + the bits cost. For the first up to four literals we use the bit + cost of the literals from the literal cost model, after that we + use the average bit cost of the cost model. + + This function is used to sometimes discard a longer backward reference + when it is not much longer and the bit cost for encoding it is more + than the saved literals. + + backward_reference_offset MUST be positive. */ +func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint { + return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset)) +} + +func backwardReferenceScoreUsingLastDistance(copy_length uint) uint { + return literalByteScore*uint(copy_length) + scoreBase + 15 +} + +func backwardReferencePenaltyUsingLastDistance(distance_short_code uint) uint { + return uint(39) + ((0x1CA10 >> (distance_short_code & 0xE)) & 0xE) +} + +func testStaticDictionaryItem(dictionary *encoderDictionary, item uint, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult) bool { + var len uint + var word_idx uint + var offset uint + var matchlen uint + var backward uint + var score uint + len = item & 0x1F + word_idx = item >> 5 + offset = uint(dictionary.words.offsets_by_length[len]) + len*word_idx + if len > max_length { + return false + } + + matchlen = findMatchLengthWithLimit(data, dictionary.words.data[offset:], uint(len)) + if matchlen+uint(dictionary.cutoffTransformsCount) <= len || matchlen == 0 { + return false + } + { + var cut uint = len - matchlen + var transform_id uint = (cut << 2) + uint((dictionary.cutoffTransforms>>(cut*6))&0x3F) + backward = max_backward + 1 + word_idx + (transform_id << dictionary.words.size_bits_by_length[len]) + } + + if backward > max_distance { + return false + } + + score = backwardReferenceScore(matchlen, backward) + if score < out.score { + return false + } + + out.len = matchlen + out.len_code_delta = int(len) - int(matchlen) + out.distance = backward + out.score = score + return true +} + +func searchInStaticDictionary(dictionary *encoderDictionary, handle hasherHandle, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult, shallow bool) { + var key uint + var i uint + var self *hasherCommon = handle.Common() + if self.dict_num_matches < self.dict_num_lookups>>7 { + return + } + + key = uint(hash14(data) << 1) + for i = 0; ; (func() { i++; key++ })() { + var tmp uint + if shallow { + tmp = 1 + } else { + tmp = 2 + } + if i >= tmp { + break + } + var item uint = uint(dictionary.hash_table[key]) + self.dict_num_lookups++ + if item != 0 { + var item_matches bool = testStaticDictionaryItem(dictionary, item, data, max_length, max_backward, max_distance, out) + if item_matches { + self.dict_num_matches++ + } + } + } +} + +type backwardMatch struct { + distance uint32 + length_and_code uint32 +} + +func initBackwardMatch(self *backwardMatch, dist uint, len uint) { + self.distance = uint32(dist) + self.length_and_code = uint32(len << 5) +} + +func initDictionaryBackwardMatch(self *backwardMatch, dist uint, len uint, len_code uint) { + self.distance = uint32(dist) + var tmp uint + if len == len_code { + tmp = 0 + } else { + tmp = len_code + } + self.length_and_code = uint32(len<<5 | tmp) +} + +func backwardMatchLength(self *backwardMatch) uint { + return uint(self.length_and_code >> 5) +} + +func backwardMatchLengthCode(self *backwardMatch) uint { + var code uint = uint(self.length_and_code) & 31 + if code != 0 { + return code + } else { + return backwardMatchLength(self) + } +} + +func hasherReset(handle hasherHandle) { + if handle == nil { + return + } + handle.Common().is_prepared_ = false +} + +func newHasher(typ int) hasherHandle { + switch typ { + case 2: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 1, + hashLen: 5, + useDictionary: true, + } + case 3: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 2, + hashLen: 5, + useDictionary: false, + } + case 4: + return &hashLongestMatchQuickly{ + bucketBits: 17, + bucketSweep: 4, + hashLen: 5, + useDictionary: true, + } + case 5: + return new(h5) + case 6: + return new(h6) + case 10: + return new(h10) + case 35: + return &hashComposite{ + ha: newHasher(3), + hb: &hashRolling{jump: 4}, + } + case 40: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 4, + } + case 41: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 10, + } + case 42: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 512, + bankBits: 9, + numLastDistancesToCheck: 16, + } + case 54: + return &hashLongestMatchQuickly{ + bucketBits: 20, + bucketSweep: 4, + hashLen: 7, + useDictionary: false, + } + case 55: + return &hashComposite{ + ha: newHasher(54), + hb: &hashRolling{jump: 4}, + } + case 65: + return &hashComposite{ + ha: newHasher(6), + hb: &hashRolling{jump: 1}, + } + } + + panic(fmt.Sprintf("unknown hasher type: %d", typ)) +} + +func hasherSetup(handle *hasherHandle, params *encoderParams, data []byte, position uint, input_size uint, is_last bool) { + var self hasherHandle = nil + var common *hasherCommon = nil + var one_shot bool = (position == 0 && is_last) + if *handle == nil { + chooseHasher(params, ¶ms.hasher) + self = newHasher(params.hasher.type_) + + *handle = self + common = self.Common() + common.params = params.hasher + self.Initialize(params) + } + + self = *handle + common = self.Common() + if !common.is_prepared_ { + self.Prepare(one_shot, input_size, data) + + if position == 0 { + common.dict_num_lookups = 0 + common.dict_num_matches = 0 + } + + common.is_prepared_ = true + } +} + +func initOrStitchToPreviousBlock(handle *hasherHandle, data []byte, mask uint, params *encoderParams, position uint, input_size uint, is_last bool) { + var self hasherHandle + hasherSetup(handle, params, data, position, input_size, is_last) + self = *handle + self.StitchToPreviousBlock(input_size, position, data, mask) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_composite.go b/vendor/github.com/andybalholm/brotli/hash_composite.go new file mode 100644 index 00000000000..a65fe2e6a9a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_composite.go @@ -0,0 +1,93 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (h *hashComposite) HashTypeLength() uint { + var a uint = h.ha.HashTypeLength() + var b uint = h.hb.HashTypeLength() + if a > b { + return a + } else { + return b + } +} + +func (h *hashComposite) StoreLookahead() uint { + var a uint = h.ha.StoreLookahead() + var b uint = h.hb.StoreLookahead() + if a > b { + return a + } else { + return b + } +} + +/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A + and HASHER_B. */ +type hashComposite struct { + hasherCommon + ha hasherHandle + hb hasherHandle + params *encoderParams +} + +func (h *hashComposite) Initialize(params *encoderParams) { + h.params = params +} + +/* TODO: Initialize of the hashers is defered to Prepare (and params + remembered here) because we don't get the one_shot and input_size params + here that are needed to know the memory size of them. Instead provide + those params to all hashers InitializehashComposite */ +func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) { + if h.ha == nil { + var common_a *hasherCommon + var common_b *hasherCommon + + common_a = h.ha.Common() + common_a.params = h.params.hasher + common_a.is_prepared_ = false + common_a.dict_num_lookups = 0 + common_a.dict_num_matches = 0 + h.ha.Initialize(h.params) + + common_b = h.hb.Common() + common_b.params = h.params.hasher + common_b.is_prepared_ = false + common_b.dict_num_lookups = 0 + common_b.dict_num_matches = 0 + h.hb.Initialize(h.params) + } + + h.ha.Prepare(one_shot, input_size, data) + h.hb.Prepare(one_shot, input_size, data) +} + +func (h *hashComposite) Store(data []byte, mask uint, ix uint) { + h.ha.Store(data, mask, ix) + h.hb.Store(data, mask, ix) +} + +func (h *hashComposite) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + h.ha.StoreRange(data, mask, ix_start, ix_end) + h.hb.StoreRange(data, mask, ix_start, ix_end) +} + +func (h *hashComposite) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) + h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) +} + +func (h *hashComposite) PrepareDistanceCache(distance_cache []int) { + h.ha.PrepareDistanceCache(distance_cache) + h.hb.PrepareDistanceCache(distance_cache) +} + +func (h *hashComposite) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + h.ha.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) + h.hb.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go new file mode 100644 index 00000000000..306e46d3dba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go @@ -0,0 +1,252 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*hashForgetfulChain) HashTypeLength() uint { + return 4 +} + +func (*hashForgetfulChain) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in.*/ +func (h *hashForgetfulChain) HashBytes(data []byte) uint { + var hash uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint(hash >> (32 - h.bucketBits)) +} + +type slot struct { + delta uint16 + next uint16 +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + Hashes are stored in chains which are bucketed to groups. Group of chains + share a storage "bank". When more than "bank size" chain nodes are added, + oldest nodes are replaced; this way several chains may share a tail. */ +type hashForgetfulChain struct { + hasherCommon + + bucketBits uint + numBanks uint + bankBits uint + numLastDistancesToCheck int + + addr []uint32 + head []uint16 + tiny_hash [65536]byte + banks [][]slot + free_slot_idx []uint16 + max_hops uint +} + +func (h *hashForgetfulChain) Initialize(params *encoderParams) { + var q uint + if params.quality > 6 { + q = 7 + } else { + q = 8 + } + h.max_hops = q << uint(params.quality-4) + + bankSize := 1 << h.bankBits + bucketSize := 1 << h.bucketBits + + h.addr = make([]uint32, bucketSize) + h.head = make([]uint16, bucketSize) + h.banks = make([][]slot, h.numBanks) + for i := range h.banks { + h.banks[i] = make([]slot, bankSize) + } + h.free_slot_idx = make([]uint16, h.numBanks) +} + +func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte) { + var partial_prepare_threshold uint = (1 << h.bucketBits) >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var bucket uint = h.HashBytes(data[i:]) + + /* See InitEmpty comment. */ + h.addr[bucket] = 0xCCCCCCCC + + h.head[bucket] = 0xCCCC + } + } else { + /* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position + processed by hasher never reaches 3GB + 64M; this makes all new chains + to be terminated after the first node. */ + for i := range h.addr { + h.addr[i] = 0xCCCCCCCC + } + + for i := range h.head { + h.head[i] = 0 + } + } + + h.tiny_hash = [65536]byte{} + for i := range h.free_slot_idx { + h.free_slot_idx[i] = 0 + } +} + +/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend + node to corresponding chain; also update tiny_hash for current position. */ +func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) { + var key uint = h.HashBytes(data[ix&mask:]) + var bank uint = key & (h.numBanks - 1) + idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1) + h.free_slot_idx[bank]++ + var delta uint = ix - uint(h.addr[key]) + h.tiny_hash[uint16(ix)] = byte(key) + if delta > 0xFFFF { + delta = 0xFFFF + } + h.banks[bank][idx].delta = uint16(delta) + h.banks[bank][idx].next = h.head[key] + h.addr[key] = uint32(ix) + h.head[key] = uint16(idx) +} + +func (h *hashForgetfulChain) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashForgetfulChain) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ring_buffer_mask, position-3) + h.Store(ringbuffer, ring_buffer_mask, position-2) + h.Store(ringbuffer, ring_buffer_mask, position-1) + } +} + +func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.numLastDistancesToCheck) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var key uint = h.HashBytes(data[cur_ix_masked:]) + var tiny_hash byte = byte(key) + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i := 0; i < h.numLastDistancesToCheck; i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = (cur_ix - backward) + + /* For distance code 0 we want to consider 2-byte matches. */ + if i > 0 && h.tiny_hash[uint16(prev_ix)] != tiny_hash { + continue + } + if prev_ix >= cur_ix || backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 2 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(uint(i)) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var bank uint = key & (h.numBanks - 1) + var backward uint = 0 + var hops uint = h.max_hops + var delta uint = cur_ix - uint(h.addr[key]) + var slot uint = uint(h.head[key]) + for { + tmp6 := hops + hops-- + if tmp6 == 0 { + break + } + var prev_ix uint + var last uint = slot + backward += delta + if backward > max_backward { + break + } + prev_ix = (cur_ix - backward) & ring_buffer_mask + slot = uint(h.banks[bank][last].next) + delta = uint(h.banks[bank][last].delta) + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + h.Store(data, ring_buffer_mask, cur_ix) + } + + if out.score == min_score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go new file mode 100644 index 00000000000..9375dc15539 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression + a little faster (0.5% - 1%) and it compresses 0.15% better on small text + and HTML inputs. */ + +func (*hashLongestMatchQuickly) HashTypeLength() uint { + return 8 +} + +func (*hashLongestMatchQuickly) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place + the address in. The HashLongestMatch and hashLongestMatchQuickly + classes have separate, different implementations of hashing. */ +func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 { + var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64) + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(hash >> (64 - h.bucketBits)) +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (1 << 16). Starting from the + given index, 1 buckets are used to store values of a key. */ +type hashLongestMatchQuickly struct { + hasherCommon + + bucketBits uint + bucketSweep int + hashLen uint + useDictionary bool + + buckets []uint32 +} + +func (h *hashLongestMatchQuickly) Initialize(params *encoderParams) { + h.buckets = make([]uint32, 1<> 7 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = h.HashBytes(data[i:]) + for j := 0; j < h.bucketSweep; j++ { + h.buckets[key+uint32(j)] = 0 + } + } + } else { + /* It is not strictly necessary to fill this buffer here, but + not filling will make the results of the compression stochastic + (but correct). This is because random data would cause the + system to find accidentally good backward references here and there. */ + for i := range h.buckets { + h.buckets[i] = 0 + } + } +} + +/* Look at 5 bytes at &data[ix & mask]. + Compute a hash from these, and store the value somewhere within + [ix .. ix+3]. */ +func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) { + var key uint32 = h.HashBytes(data[ix&mask:]) + var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep) + /* Wiggle the value with the bucket sweep range. */ + h.buckets[key+off] = uint32(ix) +} + +func (h *hashLongestMatchQuickly) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) { +} + +/* Find a longest backward match of &data[cur_ix & ring_buffer_mask] + up to the length of max_length and stores the position cur_ix in the + hash table. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var best_len_in uint = out.len + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var key uint32 = h.HashBytes(data[cur_ix_masked:]) + var compare_char int = int(data[cur_ix_masked+best_len_in]) + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = best_len_in + var cached_backward uint = uint(distance_cache[0]) + var prev_ix uint = cur_ix - cached_backward + var bucket []uint32 + out.len_code_delta = 0 + if prev_ix < cur_ix { + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char == int(data[prev_ix+best_len]) { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = uint(len) + out.distance = cached_backward + out.score = best_score + compare_char = int(data[cur_ix_masked+best_len]) + if h.bucketSweep == 1 { + h.buckets[key] = uint32(cur_ix) + return + } + } + } + } + } + + if h.bucketSweep == 1 { + var backward uint + var len uint + + /* Only one to look for, don't bother to prepare for a loop. */ + prev_ix = uint(h.buckets[key]) + + h.buckets[key] = uint32(cur_ix) + backward = cur_ix - prev_ix + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len_in]) { + return + } + + if backward == 0 || backward > max_backward { + return + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + out.len = uint(len) + out.distance = backward + out.score = score + return + } + } + } else { + bucket = h.buckets[key:] + var i int + prev_ix = uint(bucket[0]) + bucket = bucket[1:] + for i = 0; i < h.bucketSweep; (func() { i++; tmp3 := bucket; bucket = bucket[1:]; prev_ix = uint(tmp3[0]) })() { + var backward uint = cur_ix - prev_ix + var len uint + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len]) { + continue + } + + if backward == 0 || backward > max_backward { + continue + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = score + compare_char = int(data[cur_ix_masked+best_len]) + } + } + } + } + + if h.useDictionary && min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, true) + } + + h.buckets[key+uint32((cur_ix>>3)%uint(h.bucketSweep))] = uint32(cur_ix) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_rolling.go b/vendor/github.com/andybalholm/brotli/hash_rolling.go new file mode 100644 index 00000000000..6630fc07e4b --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_rolling.go @@ -0,0 +1,168 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* NOTE: this hasher does not search in the dictionary. It is used as + backup-hasher, the main hasher already searches in it. */ + +const kRollingHashMul32 uint32 = 69069 + +const kInvalidPosHashRolling uint32 = 0xffffffff + +/* This hasher uses a longer forward length, but returning a higher value here + will hurt compression by the main hasher when combined with a composite + hasher. The hasher tests for forward itself instead. */ +func (*hashRolling) HashTypeLength() uint { + return 4 +} + +func (*hashRolling) StoreLookahead() uint { + return 4 +} + +/* Computes a code from a single byte. A lookup table of 256 values could be + used, but simply adding 1 works about as good. */ +func (*hashRolling) HashByte(b byte) uint32 { + return uint32(b) + 1 +} + +func (h *hashRolling) HashRollingFunctionInitial(state uint32, add byte, factor uint32) uint32 { + return uint32(factor*state + h.HashByte(add)) +} + +func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, factor uint32, factor_remove uint32) uint32 { + return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem)) +} + +/* Rolling hash for long distance long string matches. Stores one position + per bucket, bucket key is computed over a long region. */ +type hashRolling struct { + hasherCommon + + jump int + + state uint32 + table []uint32 + next_ix uint + factor uint32 + factor_remove uint32 +} + +func (h *hashRolling) Initialize(params *encoderParams) { + h.state = 0 + h.next_ix = 0 + + h.factor = kRollingHashMul32 + + /* Compute the factor of the oldest byte to remove: factor**steps modulo + 0xffffffff (the multiplications rely on 32-bit overflow) */ + h.factor_remove = 1 + + for i := 0; i < 32; i += h.jump { + h.factor_remove *= h.factor + } + + h.table = make([]uint32, 16777216) + for i := 0; i < 16777216; i++ { + h.table[i] = kInvalidPosHashRolling + } +} + +func (h *hashRolling) Prepare(one_shot bool, input_size uint, data []byte) { + /* Too small size, cannot use this hasher. */ + if input_size < 32 { + return + } + h.state = 0 + for i := 0; i < 32; i += h.jump { + h.state = h.HashRollingFunctionInitial(h.state, data[i], h.factor) + } +} + +func (*hashRolling) Store(data []byte, mask uint, ix uint) { +} + +func (*hashRolling) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { +} + +func (h *hashRolling) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + var position_masked uint + /* In this case we must re-initialize the hasher from scratch from the + current position. */ + + var available uint = num_bytes + if position&uint(h.jump-1) != 0 { + var diff uint = uint(h.jump) - (position & uint(h.jump-1)) + if diff > available { + available = 0 + } else { + available = available - diff + } + position += diff + } + + position_masked = position & ring_buffer_mask + + /* wrapping around ringbuffer not handled. */ + if available > ring_buffer_mask-position_masked { + available = ring_buffer_mask - position_masked + } + + h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:]) + h.next_ix = position +} + +func (*hashRolling) PrepareDistanceCache(distance_cache []int) { +} + +func (h *hashRolling) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var pos uint = h.next_ix + + if cur_ix&uint(h.jump-1) != 0 { + return + } + + /* Not enough lookahead */ + if max_length < 32 { + return + } + + for pos = h.next_ix; pos <= cur_ix; pos += uint(h.jump) { + var code uint32 = h.state & ((16777216 * 64) - 1) + var rem byte = data[pos&ring_buffer_mask] + var add byte = data[(pos+32)&ring_buffer_mask] + var found_ix uint = uint(kInvalidPosHashRolling) + + h.state = h.HashRollingFunction(h.state, add, rem, h.factor, h.factor_remove) + + if code < 16777216 { + found_ix = uint(h.table[code]) + h.table[code] = uint32(pos) + if pos == cur_ix && uint32(found_ix) != kInvalidPosHashRolling { + /* The cast to 32-bit makes backward distances up to 4GB work even + if cur_ix is above 4GB, despite using 32-bit values in the table. */ + var backward uint = uint(uint32(cur_ix - found_ix)) + if backward <= max_backward { + var found_ix_masked uint = found_ix & ring_buffer_mask + var len uint = findMatchLengthWithLimit(data[found_ix_masked:], data[cur_ix_masked:], max_length) + if len >= 4 && len > out.len { + var score uint = backwardReferenceScore(uint(len), backward) + if score > out.score { + out.len = uint(len) + out.distance = backward + out.score = score + out.len_code_delta = 0 + } + } + } + } + } + } + + h.next_ix = cur_ix + uint(h.jump) +} diff --git a/vendor/github.com/andybalholm/brotli/histogram.go b/vendor/github.com/andybalholm/brotli/histogram.go new file mode 100644 index 00000000000..0346622beb3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/histogram.go @@ -0,0 +1,226 @@ +package brotli + +import "math" + +/* The distance symbols effectively used by "Large Window Brotli" (32-bit). */ +const numHistogramDistanceSymbols = 544 + +type histogramLiteral struct { + data_ [numLiteralSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearLiteral(self *histogramLiteral) { + self.data_ = [numLiteralSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsLiteral(array []histogramLiteral, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearLiteral(&array[i:][0]) + } +} + +func histogramAddLiteral(self *histogramLiteral, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorLiteral(self *histogramLiteral, p []byte, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramLiteral(self *histogramLiteral, v *histogramLiteral) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numLiteralSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeLiteral() uint { + return numLiteralSymbols +} + +type histogramCommand struct { + data_ [numCommandSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearCommand(self *histogramCommand) { + self.data_ = [numCommandSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsCommand(array []histogramCommand, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearCommand(&array[i:][0]) + } +} + +func histogramAddCommand(self *histogramCommand, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorCommand(self *histogramCommand, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramCommand(self *histogramCommand, v *histogramCommand) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numCommandSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeCommand() uint { + return numCommandSymbols +} + +type histogramDistance struct { + data_ [numDistanceSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearDistance(self *histogramDistance) { + self.data_ = [numDistanceSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsDistance(array []histogramDistance, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearDistance(&array[i:][0]) + } +} + +func histogramAddDistance(self *histogramDistance, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorDistance(self *histogramDistance, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramDistance(self *histogramDistance, v *histogramDistance) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numDistanceSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeDistance() uint { + return numDistanceSymbols +} + +type blockSplitIterator struct { + split_ *blockSplit + idx_ uint + type_ uint + length_ uint +} + +func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) { + self.split_ = split + self.idx_ = 0 + self.type_ = 0 + if len(split.lengths) > 0 { + self.length_ = uint(split.lengths[0]) + } else { + self.length_ = 0 + } +} + +func blockSplitIteratorNext(self *blockSplitIterator) { + if self.length_ == 0 { + self.idx_++ + self.type_ = uint(self.split_.types[self.idx_]) + self.length_ = uint(self.split_.lengths[self.idx_]) + } + + self.length_-- +} + +func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) { + var pos uint = start_pos + var literal_it blockSplitIterator + var insert_and_copy_it blockSplitIterator + var dist_it blockSplitIterator + + initBlockSplitIterator(&literal_it, literal_split) + initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split) + initBlockSplitIterator(&dist_it, dist_split) + for i := range cmds { + var cmd *command = &cmds[i] + var j uint + blockSplitIteratorNext(&insert_and_copy_it) + histogramAddCommand(&insert_and_copy_histograms[insert_and_copy_it.type_], uint(cmd.cmd_prefix_)) + + /* TODO: unwrap iterator blocks. */ + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint + blockSplitIteratorNext(&literal_it) + context = literal_it.type_ + if context_modes != nil { + var lut contextLUT = getContextLUT(context_modes[context]) + context = (context << literalContextBits) + uint(getContext(prev_byte, prev_byte2, lut)) + } + + histogramAddLiteral(&literal_histograms[context], uint(ringbuffer[pos&mask])) + prev_byte2 = prev_byte + prev_byte = ringbuffer[pos&mask] + pos++ + } + + pos += uint(commandCopyLen(cmd)) + if commandCopyLen(cmd) != 0 { + prev_byte2 = ringbuffer[(pos-2)&mask] + prev_byte = ringbuffer[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var context uint + blockSplitIteratorNext(&dist_it) + context = uint(uint32(dist_it.type_< bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// acceptSpec describes an Accept* header. +type acceptSpec struct { + Value string + Q float64 +} + +// parseAccept parses Accept* headers. +func parseAccept(header http.Header, key string) (specs []acceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec acceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} diff --git a/vendor/github.com/andybalholm/brotli/huffman.go b/vendor/github.com/andybalholm/brotli/huffman.go new file mode 100644 index 00000000000..182f3d2a552 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/huffman.go @@ -0,0 +1,653 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +const huffmanMaxCodeLength = 15 + +/* Maximum possible Huffman table size for an alphabet size of (index * 32), + max code length 15 and root table bits 8. */ +var kMaxHuffmanTableSize = []uint16{ + 256, + 402, + 436, + 468, + 500, + 534, + 566, + 598, + 630, + 662, + 694, + 726, + 758, + 790, + 822, + 854, + 886, + 920, + 952, + 984, + 1016, + 1048, + 1080, + 1112, + 1144, + 1176, + 1208, + 1240, + 1272, + 1304, + 1336, + 1368, + 1400, + 1432, + 1464, + 1496, + 1528, +} + +/* BROTLI_NUM_BLOCK_LEN_SYMBOLS == 26 */ +const huffmanMaxSize26 = 396 + +/* BROTLI_MAX_BLOCK_TYPE_SYMBOLS == 258 */ +const huffmanMaxSize258 = 632 + +/* BROTLI_MAX_CONTEXT_MAP_SYMBOLS == 272 */ +const huffmanMaxSize272 = 646 + +const huffmanMaxCodeLengthCodeLength = 5 + +/* Do not create this struct directly - use the ConstructHuffmanCode + * constructor below! */ +type huffmanCode struct { + bits byte + value uint16 +} + +func constructHuffmanCode(bits byte, value uint16) huffmanCode { + var h huffmanCode + h.bits = bits + h.value = value + return h +} + +/* Builds Huffman lookup table assuming code lengths are in symbol order. */ + +/* Builds Huffman lookup table assuming code lengths are in symbol order. + Returns size of resulting table. */ + +/* Builds a simple Huffman table. The |num_symbols| parameter is to be + interpreted as follows: 0 means 1 symbol, 1 means 2 symbols, + 2 means 3 symbols, 3 means 4 symbols with lengths [2, 2, 2, 2], + 4 means 4 symbols with lengths [1, 2, 3, 3]. */ + +/* Contains a collection of Huffman trees with the same alphabet size. */ +/* max_symbol is needed due to simple codes since log2(alphabet_size) could be + greater than log2(max_symbol). */ +type huffmanTreeGroup struct { + htrees [][]huffmanCode + codes []huffmanCode + alphabet_size uint16 + max_symbol uint16 + num_htrees uint16 +} + +const reverseBitsMax = 8 + +const reverseBitsBase = 0 + +var kReverseBits = [1 << reverseBitsMax]byte{ + 0x00, + 0x80, + 0x40, + 0xC0, + 0x20, + 0xA0, + 0x60, + 0xE0, + 0x10, + 0x90, + 0x50, + 0xD0, + 0x30, + 0xB0, + 0x70, + 0xF0, + 0x08, + 0x88, + 0x48, + 0xC8, + 0x28, + 0xA8, + 0x68, + 0xE8, + 0x18, + 0x98, + 0x58, + 0xD8, + 0x38, + 0xB8, + 0x78, + 0xF8, + 0x04, + 0x84, + 0x44, + 0xC4, + 0x24, + 0xA4, + 0x64, + 0xE4, + 0x14, + 0x94, + 0x54, + 0xD4, + 0x34, + 0xB4, + 0x74, + 0xF4, + 0x0C, + 0x8C, + 0x4C, + 0xCC, + 0x2C, + 0xAC, + 0x6C, + 0xEC, + 0x1C, + 0x9C, + 0x5C, + 0xDC, + 0x3C, + 0xBC, + 0x7C, + 0xFC, + 0x02, + 0x82, + 0x42, + 0xC2, + 0x22, + 0xA2, + 0x62, + 0xE2, + 0x12, + 0x92, + 0x52, + 0xD2, + 0x32, + 0xB2, + 0x72, + 0xF2, + 0x0A, + 0x8A, + 0x4A, + 0xCA, + 0x2A, + 0xAA, + 0x6A, + 0xEA, + 0x1A, + 0x9A, + 0x5A, + 0xDA, + 0x3A, + 0xBA, + 0x7A, + 0xFA, + 0x06, + 0x86, + 0x46, + 0xC6, + 0x26, + 0xA6, + 0x66, + 0xE6, + 0x16, + 0x96, + 0x56, + 0xD6, + 0x36, + 0xB6, + 0x76, + 0xF6, + 0x0E, + 0x8E, + 0x4E, + 0xCE, + 0x2E, + 0xAE, + 0x6E, + 0xEE, + 0x1E, + 0x9E, + 0x5E, + 0xDE, + 0x3E, + 0xBE, + 0x7E, + 0xFE, + 0x01, + 0x81, + 0x41, + 0xC1, + 0x21, + 0xA1, + 0x61, + 0xE1, + 0x11, + 0x91, + 0x51, + 0xD1, + 0x31, + 0xB1, + 0x71, + 0xF1, + 0x09, + 0x89, + 0x49, + 0xC9, + 0x29, + 0xA9, + 0x69, + 0xE9, + 0x19, + 0x99, + 0x59, + 0xD9, + 0x39, + 0xB9, + 0x79, + 0xF9, + 0x05, + 0x85, + 0x45, + 0xC5, + 0x25, + 0xA5, + 0x65, + 0xE5, + 0x15, + 0x95, + 0x55, + 0xD5, + 0x35, + 0xB5, + 0x75, + 0xF5, + 0x0D, + 0x8D, + 0x4D, + 0xCD, + 0x2D, + 0xAD, + 0x6D, + 0xED, + 0x1D, + 0x9D, + 0x5D, + 0xDD, + 0x3D, + 0xBD, + 0x7D, + 0xFD, + 0x03, + 0x83, + 0x43, + 0xC3, + 0x23, + 0xA3, + 0x63, + 0xE3, + 0x13, + 0x93, + 0x53, + 0xD3, + 0x33, + 0xB3, + 0x73, + 0xF3, + 0x0B, + 0x8B, + 0x4B, + 0xCB, + 0x2B, + 0xAB, + 0x6B, + 0xEB, + 0x1B, + 0x9B, + 0x5B, + 0xDB, + 0x3B, + 0xBB, + 0x7B, + 0xFB, + 0x07, + 0x87, + 0x47, + 0xC7, + 0x27, + 0xA7, + 0x67, + 0xE7, + 0x17, + 0x97, + 0x57, + 0xD7, + 0x37, + 0xB7, + 0x77, + 0xF7, + 0x0F, + 0x8F, + 0x4F, + 0xCF, + 0x2F, + 0xAF, + 0x6F, + 0xEF, + 0x1F, + 0x9F, + 0x5F, + 0xDF, + 0x3F, + 0xBF, + 0x7F, + 0xFF, +} + +const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase)) + +/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX), + where reverse(value, len) is the bit-wise reversal of the len least + significant bits of value. */ +func reverseBits8(num uint64) uint64 { + return uint64(kReverseBits[num]) +} + +/* Stores code in table[0], table[step], table[2*step], ..., table[end] */ +/* Assumes that end is an integer multiple of step */ +func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) { + for { + end -= step + table[end] = code + if end <= 0 { + break + } + } +} + +/* Returns the table width of the next 2nd level table. |count| is the histogram + of bit lengths for the remaining symbols, |len| is the code length of the + next processed symbol. */ +func nextTableBitSize(count []uint16, len int, root_bits int) int { + var left int = 1 << uint(len-root_bits) + for len < huffmanMaxCodeLength { + left -= int(count[len]) + if left <= 0 { + break + } + len++ + left <<= 1 + } + + return len - root_bits +} + +func buildCodeLengthsHuffmanTable(table []huffmanCode, code_lengths []byte, count []uint16) { + var code huffmanCode /* current table entry */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* step size to replicate values in current table */ /* size of current table */ /* symbols sorted by code length */ + var symbol int + var key uint64 + var key_step uint64 + var step int + var table_size int + var sorted [codeLengthCodes]int + var offset [huffmanMaxCodeLengthCodeLength + 1]int + var bits int + var bits_count int + /* offsets in sorted table for each length */ + assert(huffmanMaxCodeLengthCodeLength <= reverseBitsMax) + + /* Generate offsets into sorted symbol table by code length. */ + symbol = -1 + + bits = 1 + var i int + for i = 0; i < huffmanMaxCodeLengthCodeLength; i++ { + symbol += int(count[bits]) + offset[bits] = symbol + bits++ + } + + /* Symbols with code length 0 are placed after all other symbols. */ + offset[0] = codeLengthCodes - 1 + + /* Sort symbols by length, by symbol order within each length. */ + symbol = codeLengthCodes + + for { + var i int + for i = 0; i < 6; i++ { + symbol-- + sorted[offset[code_lengths[symbol]]] = symbol + offset[code_lengths[symbol]]-- + } + if symbol == 0 { + break + } + } + + table_size = 1 << huffmanMaxCodeLengthCodeLength + + /* Special case: all symbols but one have 0 code length. */ + if offset[0] == 0 { + code = constructHuffmanCode(0, uint16(sorted[0])) + for key = 0; key < uint64(table_size); key++ { + table[key] = code + } + + return + } + + /* Fill in table. */ + key = 0 + + key_step = reverseBitsLowest + symbol = 0 + bits = 1 + step = 2 + for { + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + code = constructHuffmanCode(byte(bits), uint16(sorted[symbol])) + symbol++ + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > huffmanMaxCodeLengthCodeLength { + break + } + } +} + +func buildHuffmanTable(root_table []huffmanCode, root_bits int, symbol_lists symbolList, count []uint16) uint32 { + var code huffmanCode /* current table entry */ /* next available space in table */ /* current code length */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* 2nd level table prefix code */ /* 2nd level table prefix code addend */ /* step size to replicate values in current table */ /* key length of current table */ /* size of current table */ /* sum of root table size and 2nd level table sizes */ + var table []huffmanCode + var len int + var symbol int + var key uint64 + var key_step uint64 + var sub_key uint64 + var sub_key_step uint64 + var step int + var table_bits int + var table_size int + var total_size int + var max_length int = -1 + var bits int + var bits_count int + + assert(root_bits <= reverseBitsMax) + assert(huffmanMaxCodeLength-root_bits <= reverseBitsMax) + + for symbolListGet(symbol_lists, max_length) == 0xFFFF { + max_length-- + } + max_length += huffmanMaxCodeLength + 1 + + table = root_table + table_bits = root_bits + table_size = 1 << uint(table_bits) + total_size = table_size + + /* Fill in the root table. Reduce the table size to if possible, + and create the repetitions by memcpy. */ + if table_bits > max_length { + table_bits = max_length + table_size = 1 << uint(table_bits) + } + + key = 0 + key_step = reverseBitsLowest + bits = 1 + step = 2 + for { + symbol = bits - (huffmanMaxCodeLength + 1) + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(bits), uint16(symbol)) + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > table_bits { + break + } + } + + /* If root_bits != table_bits then replicate to fill the remaining slots. */ + for total_size != table_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + /* Fill in 2nd level tables and add pointers to root table. */ + key_step = reverseBitsLowest >> uint(root_bits-1) + + sub_key = reverseBitsLowest << 1 + sub_key_step = reverseBitsLowest + len = root_bits + 1 + step = 2 + for ; len <= max_length; len++ { + symbol = len - (huffmanMaxCodeLength + 1) + for ; count[len] != 0; count[len]-- { + if sub_key == reverseBitsLowest<<1 { + table = table[table_size:] + table_bits = nextTableBitSize(count, int(len), root_bits) + table_size = 1 << uint(table_bits) + total_size += table_size + sub_key = reverseBits8(key) + key += key_step + root_table[sub_key] = constructHuffmanCode(byte(table_bits+root_bits), uint16(uint64(uint(-cap(table)+cap(root_table)))-sub_key)) + sub_key = 0 + } + + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(len-root_bits), uint16(symbol)) + replicateValue(table[reverseBits8(sub_key):], step, table_size, code) + sub_key += sub_key_step + } + + step <<= 1 + sub_key_step >>= 1 + } + + return uint32(total_size) +} + +func buildSimpleHuffmanTable(table []huffmanCode, root_bits int, val []uint16, num_symbols uint32) uint32 { + var table_size uint32 = 1 + var goal_size uint32 = 1 << uint(root_bits) + switch num_symbols { + case 0: + table[0] = constructHuffmanCode(0, val[0]) + + case 1: + if val[1] > val[0] { + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(1, val[1]) + } else { + table[0] = constructHuffmanCode(1, val[1]) + table[1] = constructHuffmanCode(1, val[0]) + } + + table_size = 2 + + case 2: + table[0] = constructHuffmanCode(1, val[0]) + table[2] = constructHuffmanCode(1, val[0]) + if val[2] > val[1] { + table[1] = constructHuffmanCode(2, val[1]) + table[3] = constructHuffmanCode(2, val[2]) + } else { + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[1]) + } + + table_size = 4 + + case 3: + var i int + var k int + for i = 0; i < 3; i++ { + for k = i + 1; k < 4; k++ { + if val[k] < val[i] { + var t uint16 = val[k] + val[k] = val[i] + val[i] = t + } + } + } + + table[0] = constructHuffmanCode(2, val[0]) + table[2] = constructHuffmanCode(2, val[1]) + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[3]) + table_size = 4 + + case 4: + if val[3] < val[2] { + var t uint16 = val[3] + val[3] = val[2] + val[2] = t + } + + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(2, val[1]) + table[2] = constructHuffmanCode(1, val[0]) + table[3] = constructHuffmanCode(3, val[2]) + table[4] = constructHuffmanCode(1, val[0]) + table[5] = constructHuffmanCode(2, val[1]) + table[6] = constructHuffmanCode(1, val[0]) + table[7] = constructHuffmanCode(3, val[3]) + table_size = 8 + } + + for table_size != goal_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + return goal_size +} diff --git a/vendor/github.com/andybalholm/brotli/literal_cost.go b/vendor/github.com/andybalholm/brotli/literal_cost.go new file mode 100644 index 00000000000..5a9ace94ee0 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/literal_cost.go @@ -0,0 +1,182 @@ +package brotli + +func utf8Position(last uint, c uint, clamp uint) uint { + if c < 128 { + return 0 /* Next one is the 'Byte 1' again. */ + } else if c >= 192 { /* Next one is the 'Byte 2' of utf-8 encoding. */ + return brotli_min_size_t(1, clamp) + } else { + /* Let's decide over the last byte if this ends the sequence. */ + if last < 0xE0 { + return 0 /* Completed two or three byte coding. */ /* Next one is the 'Byte 3' of utf-8 encoding. */ + } else { + return brotli_min_size_t(2, clamp) + } + } +} + +func decideMultiByteStatsLevel(pos uint, len uint, mask uint, data []byte) uint { + var counts = [3]uint{0} /* should be 2, but 1 compresses better. */ + var max_utf8 uint = 1 + var last_c uint = 0 + var i uint + for i = 0; i < len; i++ { + var c uint = uint(data[(pos+i)&mask]) + counts[utf8Position(last_c, c, 2)]++ + last_c = c + } + + if counts[2] < 500 { + max_utf8 = 1 + } + + if counts[1]+counts[2] < 25 { + max_utf8 = 0 + } + + return max_utf8 +} + +func estimateBitCostsForLiteralsUTF8(pos uint, len uint, mask uint, data []byte, cost []float32) { + var max_utf8 uint = decideMultiByteStatsLevel(pos, uint(len), mask, data) + /* Bootstrap histograms. */ + var histogram = [3][256]uint{[256]uint{0}} + var window_half uint = 495 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var in_window_utf8 = [3]uint{0} + /* max_utf8 is 0 (normal ASCII single byte modeling), + 1 (for 2-byte UTF-8 modeling), or 2 (for 3-byte UTF-8 modeling). */ + + var i uint + { + var last_c uint = 0 + var utf8_pos uint = 0 + for i = 0; i < in_window; i++ { + var c uint = uint(data[(pos+i)&mask]) + histogram[utf8_pos][c]++ + in_window_utf8[utf8_pos]++ + utf8_pos = utf8Position(last_c, c, max_utf8) + last_c = c + } + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + if i >= window_half { + var c uint + var last_c uint + if i < window_half+1 { + c = 0 + } else { + c = uint(data[(pos+i-window_half-1)&mask]) + } + if i < window_half+2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-window_half-2)&mask]) + } + /* Remove a byte in the past. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i-window_half)&mask]]-- + in_window_utf8[utf8_pos2]-- + } + + if i+window_half < len { + var c uint = uint(data[(pos+i+window_half-1)&mask]) + var last_c uint = uint(data[(pos+i+window_half-2)&mask]) + /* Add a byte in the future. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i+window_half)&mask]]++ + in_window_utf8[utf8_pos2]++ + } + { + var c uint + var last_c uint + if i < 1 { + c = 0 + } else { + c = uint(data[(pos+i-1)&mask]) + } + if i < 2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-2)&mask]) + } + var utf8_pos uint = utf8Position(last_c, c, max_utf8) + var masked_pos uint = (pos + i) & mask + var histo uint = histogram[utf8_pos][data[masked_pos]] + var lit_cost float64 + if histo == 0 { + histo = 1 + } + + lit_cost = fastLog2(in_window_utf8[utf8_pos]) - fastLog2(histo) + lit_cost += 0.02905 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + /* Make the first bytes more expensive -- seems to help, not sure why. + Perhaps because the entropy source is changing its properties + rapidly in the beginning of the file, perhaps because the beginning + of the data is a statistical "anomaly". */ + if i < 2000 { + lit_cost += 0.7 - (float64(2000-i) / 2000.0 * 0.35) + } + + cost[i] = float32(lit_cost) + } + } +} + +func estimateBitCostsForLiterals(pos uint, len uint, mask uint, data []byte, cost []float32) { + if isMostlyUTF8(data, pos, mask, uint(len), kMinUTF8Ratio) { + estimateBitCostsForLiteralsUTF8(pos, uint(len), mask, data, cost) + return + } else { + var histogram = [256]uint{0} + var window_half uint = 2000 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var i uint + /* Bootstrap histogram. */ + for i = 0; i < in_window; i++ { + histogram[data[(pos+i)&mask]]++ + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + var histo uint + if i >= window_half { + /* Remove a byte in the past. */ + histogram[data[(pos+i-window_half)&mask]]-- + + in_window-- + } + + if i+window_half < len { + /* Add a byte in the future. */ + histogram[data[(pos+i+window_half)&mask]]++ + + in_window++ + } + + histo = histogram[data[(pos+i)&mask]] + if histo == 0 { + histo = 1 + } + { + var lit_cost float64 = fastLog2(in_window) - fastLog2(histo) + lit_cost += 0.029 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + cost[i] = float32(lit_cost) + } + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/memory.go b/vendor/github.com/andybalholm/brotli/memory.go new file mode 100644 index 00000000000..a07c7050a07 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/memory.go @@ -0,0 +1,66 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* +Dynamically grows array capacity to at least the requested size +T: data type +A: array +C: capacity +R: requested size +*/ +func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) { + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + var new_array []byte = make([]byte, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + + *c = new_size + } +} + +func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) { + var new_array []uint32 + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + new_array = make([]uint32, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + *c = new_size + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock.go b/vendor/github.com/andybalholm/brotli/metablock.go new file mode 100644 index 00000000000..3014df8cdf1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock.go @@ -0,0 +1,574 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2014 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Algorithms for distributing the literals and commands of a metablock between + block types and contexts. */ + +type metaBlockSplit struct { + literal_split blockSplit + command_split blockSplit + distance_split blockSplit + literal_context_map []uint32 + literal_context_map_size uint + distance_context_map []uint32 + distance_context_map_size uint + literal_histograms []histogramLiteral + literal_histograms_size uint + command_histograms []histogramCommand + command_histograms_size uint + distance_histograms []histogramDistance + distance_histograms_size uint +} + +var metaBlockPool sync.Pool + +func getMetaBlockSplit() *metaBlockSplit { + mb, _ := metaBlockPool.Get().(*metaBlockSplit) + + if mb == nil { + mb = &metaBlockSplit{} + } else { + initBlockSplit(&mb.literal_split) + initBlockSplit(&mb.command_split) + initBlockSplit(&mb.distance_split) + mb.literal_context_map = mb.literal_context_map[:0] + mb.literal_context_map_size = 0 + mb.distance_context_map = mb.distance_context_map[:0] + mb.distance_context_map_size = 0 + mb.literal_histograms = mb.literal_histograms[:0] + mb.command_histograms = mb.command_histograms[:0] + mb.distance_histograms = mb.distance_histograms[:0] + } + return mb +} + +func freeMetaBlockSplit(mb *metaBlockSplit) { + metaBlockPool.Put(mb) +} + +func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) { + var dist_params *distanceParams = ¶ms.dist + var alphabet_size uint32 + var max_distance uint32 + + dist_params.distance_postfix_bits = npostfix + dist_params.num_direct_distance_codes = ndirect + + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), maxDistanceBits)) + max_distance = ndirect + (1 << (maxDistanceBits + npostfix + 2)) - (1 << (npostfix + 2)) + + if params.large_window { + var bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} + var postfix uint32 = 1 << npostfix + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), largeMaxDistanceBits)) + + /* The maximum distance is set so that no distance symbol used can encode + a distance larger than BROTLI_MAX_ALLOWED_DISTANCE with all + its extra bits set. */ + if ndirect < bound[npostfix] { + max_distance = maxAllowedDistance - (bound[npostfix] - ndirect) + } else if ndirect >= bound[npostfix]+postfix { + max_distance = (3 << 29) - 4 + (ndirect - bound[npostfix]) + } else { + max_distance = maxAllowedDistance + } + } + + dist_params.alphabet_size = alphabet_size + dist_params.max_distance = uint(max_distance) +} + +func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) { + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + return + } + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + } + } +} + +func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool { + var equal_params bool = false + var dist_prefix uint16 + var dist_extra uint32 + var extra_bits float64 = 0.0 + var histo histogramDistance + histogramClearDistance(&histo) + + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + equal_params = true + } + + for i := range cmds { + cmd := &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + if equal_params { + dist_prefix = cmd.dist_prefix_ + } else { + var distance uint32 = commandRestoreDistanceCode(cmd, orig_params) + if distance > uint32(new_params.max_distance) { + return false + } + + prefixEncodeCopyDistance(uint(distance), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &dist_prefix, &dist_extra) + } + + histogramAddDistance(&histo, uint(dist_prefix)&0x3FF) + extra_bits += float64(dist_prefix >> 10) + } + } + + *cost = populationCostDistance(&histo) + extra_bits + return true +} + +var buildMetaBlock_kMaxNumberOfHistograms uint = 256 + +func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) { + var distance_histograms []histogramDistance + var literal_histograms []histogramLiteral + var literal_context_modes []int = nil + var literal_histograms_size uint + var distance_histograms_size uint + var i uint + var literal_context_multiplier uint = 1 + var npostfix uint32 + var ndirect_msb uint32 = 0 + var check_orig bool = true + var best_dist_cost float64 = 1e99 + var orig_params encoderParams = *params + /* Histogram ids need to fit in one byte. */ + + var new_params encoderParams = *params + + for npostfix = 0; npostfix <= maxNpostfix; npostfix++ { + for ; ndirect_msb < 16; ndirect_msb++ { + var ndirect uint32 = ndirect_msb << npostfix + var skip bool + var dist_cost float64 + initDistanceParams(&new_params, npostfix, ndirect) + if npostfix == orig_params.dist.distance_postfix_bits && ndirect == orig_params.dist.num_direct_distance_codes { + check_orig = false + } + + skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost) + if skip || (dist_cost > best_dist_cost) { + break + } + + best_dist_cost = dist_cost + params.dist = new_params.dist + } + + if ndirect_msb > 0 { + ndirect_msb-- + } + ndirect_msb /= 2 + } + + if check_orig { + var dist_cost float64 + computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost) + if dist_cost < best_dist_cost { + /* NB: currently unused; uncomment when more param tuning is added. */ + /* best_dist_cost = dist_cost; */ + params.dist = orig_params.dist + } + } + + recomputeDistancePrefixes(cmds, &orig_params.dist, ¶ms.dist) + + splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split) + + if !params.disable_literal_context_modeling { + literal_context_multiplier = 1 << literalContextBits + literal_context_modes = make([]int, (mb.literal_split.num_types)) + for i = 0; i < mb.literal_split.num_types; i++ { + literal_context_modes[i] = literal_context_mode + } + } + + literal_histograms_size = mb.literal_split.num_types * literal_context_multiplier + literal_histograms = make([]histogramLiteral, literal_histograms_size) + clearHistogramsLiteral(literal_histograms, literal_histograms_size) + + distance_histograms_size = mb.distance_split.num_types << distanceContextBits + distance_histograms = make([]histogramDistance, distance_histograms_size) + clearHistogramsDistance(distance_histograms, distance_histograms_size) + + mb.command_histograms_size = mb.command_split.num_types + if cap(mb.command_histograms) < int(mb.command_histograms_size) { + mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size)) + } else { + mb.command_histograms = mb.command_histograms[:mb.command_histograms_size] + } + clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size) + + buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms) + literal_context_modes = nil + + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + mb.literal_histograms_size = mb.literal_context_map_size + if cap(mb.literal_histograms) < int(mb.literal_histograms_size) { + mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size)) + } else { + mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size] + } + + clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map) + literal_histograms = nil + + if params.disable_literal_context_modeling { + /* Distribute assignment to all contexts. */ + for i = mb.literal_split.num_types; i != 0; { + var j uint = 0 + i-- + for ; j < 1< 0 { + var entropy [maxStaticContexts]float64 + var combined_histo []histogramLiteral = make([]histogramLiteral, (2 * num_contexts)) + var combined_entropy [2 * maxStaticContexts]float64 + var diff = [2]float64{0.0} + /* Try merging the set of histograms for the current block type with the + respective set of histograms for the last and second last block types. + Decide over the split based on the total reduction of entropy across + all contexts. */ + + var i uint + for i = 0; i < num_contexts; i++ { + var curr_histo_ix uint = self.curr_histogram_ix_ + i + var j uint + entropy[i] = bitsEntropy(histograms[curr_histo_ix].data_[:], self.alphabet_size_) + for j = 0; j < 2; j++ { + var jx uint = j*num_contexts + i + var last_histogram_ix uint = self.last_histogram_ix_[j] + i + combined_histo[jx] = histograms[curr_histo_ix] + histogramAddHistogramLiteral(&combined_histo[jx], &histograms[last_histogram_ix]) + combined_entropy[jx] = bitsEntropy(combined_histo[jx].data_[0:], self.alphabet_size_) + diff[j] += combined_entropy[jx] - entropy[i] - last_entropy[jx] + } + } + + if split.num_types < self.max_block_types_ && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = split.num_types * num_contexts + for i = 0; i < num_contexts; i++ { + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = entropy[i] + } + + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_ += num_contexts + if self.curr_histogram_ix_ < *self.histograms_size_ { + clearHistogramsLiteral(self.histograms_[self.curr_histogram_ix_:], self.num_contexts_) + } + + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[num_contexts+i] + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = combined_entropy[num_contexts+i] + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.num_blocks_++ + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[i] + last_entropy[i] = combined_entropy[i] + if split.num_types == 1 { + last_entropy[num_contexts+i] = last_entropy[i] + } + + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.block_size_ = 0 + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + + combined_histo = nil + } + + if is_final { + *self.histograms_size_ = split.num_types * num_contexts + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current block type and context. When the + current block reaches the target size, decides on merging the block. */ +func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + contextBlockSplitterFinishBlock(self, false) /* is_final = */ + } +} + +func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) { + var i uint + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + for i = 0; i < mb.literal_split.num_types; i++ { + var offset uint32 = uint32(i * num_contexts) + var j uint + for j = 0; j < 1<= 128 { + blockSplitterAddSymbolDistance(&dist_blocks, uint(cmd.dist_prefix_)&0x3FF) + } + } + } + + if num_contexts == 1 { + blockSplitterFinishBlockLiteral(&lit_blocks.plain, true) /* is_final = */ + } else { + contextBlockSplitterFinishBlock(&lit_blocks.ctx, true) /* is_final = */ + } + + blockSplitterFinishBlockCommand(&cmd_blocks, true) /* is_final = */ + blockSplitterFinishBlockDistance(&dist_blocks, true) /* is_final = */ + + if num_contexts > 1 { + mapStaticContexts(num_contexts, static_context_map, mb) + } +} + +func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) { + if num_contexts == 1 { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb) + } else { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb) + } +} + +func optimizeHistograms(num_distance_codes uint32, mb *metaBlockSplit) { + var good_for_rle [numCommandSymbols]byte + var i uint + for i = 0; i < mb.literal_histograms_size; i++ { + optimizeHuffmanCountsForRLE(256, mb.literal_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.command_histograms_size; i++ { + optimizeHuffmanCountsForRLE(numCommandSymbols, mb.command_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.distance_histograms_size; i++ { + optimizeHuffmanCountsForRLE(uint(num_distance_codes), mb.distance_histograms[i].data_[:], good_for_rle[:]) + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_command.go b/vendor/github.com/andybalholm/brotli/metablock_command.go new file mode 100644 index 00000000000..14c7b77135d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_command.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterCommand struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramCommand + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramCommand, (*histograms_size)) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearCommand(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramCommand = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramCommand + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) { + histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockCommand(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_distance.go b/vendor/github.com/andybalholm/brotli/metablock_distance.go new file mode 100644 index 00000000000..5110a810e96 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_distance.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterDistance struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramDistance + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramDistance, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramDistance, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearDistance(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramDistance = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramDistance + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramDistance(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) { + histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockDistance(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_literal.go b/vendor/github.com/andybalholm/brotli/metablock_literal.go new file mode 100644 index 00000000000..307f8da88f4 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_literal.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterLiteral struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramLiteral + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramLiteral, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearLiteral(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramLiteral = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramLiteral + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramLiteral(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockLiteral(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/params.go b/vendor/github.com/andybalholm/brotli/params.go new file mode 100644 index 00000000000..0a4c6875212 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/params.go @@ -0,0 +1,37 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Parameters for the Brotli encoder with chosen quality levels. */ +type hasherParams struct { + type_ int + bucket_bits int + block_bits int + hash_len int + num_last_distances_to_check int +} + +type distanceParams struct { + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + alphabet_size uint32 + max_distance uint +} + +/* Encoding parameters */ +type encoderParams struct { + mode int + quality int + lgwin uint + lgblock int + size_hint uint + disable_literal_context_modeling bool + large_window bool + hasher hasherParams + dist distanceParams + dictionary encoderDictionary +} diff --git a/vendor/github.com/andybalholm/brotli/platform.go b/vendor/github.com/andybalholm/brotli/platform.go new file mode 100644 index 00000000000..4ebfb1528ba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/platform.go @@ -0,0 +1,103 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func brotli_min_double(a float64, b float64) float64 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_double(a float64, b float64) float64 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_float(a float32, b float32) float32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_float(a float32, b float32) float32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_int(a int, b int) int { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_int(a int, b int) int { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_size_t(a uint, b uint) uint { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_size_t(a uint, b uint) uint { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint32_t(a uint32, b uint32) uint32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint32_t(a uint32, b uint32) uint32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint8_t(a byte, b byte) byte { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint8_t(a byte, b byte) byte { + if a > b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix.go b/vendor/github.com/andybalholm/brotli/prefix.go new file mode 100644 index 00000000000..484df0d61ec --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for encoding of integers into prefix codes the amount of extra + bits, and the actual values of the extra bits. */ + +/* Here distance_code is an intermediate code, i.e. one of the special codes or + the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */ +func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) { + if distance_code < numDistanceShortCodes+num_direct_codes { + *code = uint16(distance_code) + *extra_bits = 0 + return + } else { + var dist uint = (uint(1) << (postfix_bits + 2)) + (distance_code - numDistanceShortCodes - num_direct_codes) + var bucket uint = uint(log2FloorNonZero(dist) - 1) + var postfix_mask uint = (1 << postfix_bits) - 1 + var postfix uint = dist & postfix_mask + var prefix uint = (dist >> bucket) & 1 + var offset uint = (2 + prefix) << bucket + var nbits uint = bucket - postfix_bits + *code = uint16(nbits<<10 | (numDistanceShortCodes + num_direct_codes + ((2*(nbits-1) + prefix) << postfix_bits) + postfix)) + *extra_bits = uint32((dist - offset) >> postfix_bits) + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix_dec.go b/vendor/github.com/andybalholm/brotli/prefix_dec.go new file mode 100644 index 00000000000..183f0d53fed --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix_dec.go @@ -0,0 +1,723 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +type cmdLutElement struct { + insert_len_extra_bits byte + copy_len_extra_bits byte + distance_code int8 + context byte + insert_len_offset uint16 + copy_len_offset uint16 +} + +var kCmdLut = [numCommandSymbols]cmdLutElement{ + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000a, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000a, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000a, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0009}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000e, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000e, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000e, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x0012, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x0012, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x0012, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x001a, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x001a, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x001a, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0022, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0022, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0022, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0032, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0032, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0032, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0042, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0042, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0042, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0062, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0062, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0062, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0009}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0036}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0036}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0000, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0000, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0000, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0000, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0000, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0000, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0001, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0001, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0001, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0001, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0001, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0001, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0002, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0002, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0002, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0002, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0002, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0002, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0003, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0003, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0003, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0003, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0003, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0003, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0004, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0004, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0004, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0004, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0004, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0004, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0005, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0005, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0005, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0005, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0005, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0005, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0006, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0006, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0006, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0006, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0006, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0006, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0008, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0008, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0008, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0008, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0008, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0008, 0x0846}, + cmdLutElement{0x06, 0x00, -1, 0x00, 0x0082, 0x0002}, + cmdLutElement{0x06, 0x00, -1, 0x01, 0x0082, 0x0003}, + cmdLutElement{0x06, 0x00, -1, 0x02, 0x0082, 0x0004}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0005}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0006}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0007}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0008}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0009}, + cmdLutElement{0x07, 0x00, -1, 0x00, 0x00c2, 0x0002}, + cmdLutElement{0x07, 0x00, -1, 0x01, 0x00c2, 0x0003}, + cmdLutElement{0x07, 0x00, -1, 0x02, 0x00c2, 0x0004}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0005}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0006}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0007}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0008}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0009}, + cmdLutElement{0x08, 0x00, -1, 0x00, 0x0142, 0x0002}, + cmdLutElement{0x08, 0x00, -1, 0x01, 0x0142, 0x0003}, + cmdLutElement{0x08, 0x00, -1, 0x02, 0x0142, 0x0004}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0005}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0006}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0007}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0008}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0009}, + cmdLutElement{0x09, 0x00, -1, 0x00, 0x0242, 0x0002}, + cmdLutElement{0x09, 0x00, -1, 0x01, 0x0242, 0x0003}, + cmdLutElement{0x09, 0x00, -1, 0x02, 0x0242, 0x0004}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0005}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0006}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0007}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0008}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0009}, + cmdLutElement{0x0a, 0x00, -1, 0x00, 0x0442, 0x0002}, + cmdLutElement{0x0a, 0x00, -1, 0x01, 0x0442, 0x0003}, + cmdLutElement{0x0a, 0x00, -1, 0x02, 0x0442, 0x0004}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0005}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0006}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0007}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0008}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0009}, + cmdLutElement{0x0c, 0x00, -1, 0x00, 0x0842, 0x0002}, + cmdLutElement{0x0c, 0x00, -1, 0x01, 0x0842, 0x0003}, + cmdLutElement{0x0c, 0x00, -1, 0x02, 0x0842, 0x0004}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0005}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0006}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0007}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0008}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0009}, + cmdLutElement{0x0e, 0x00, -1, 0x00, 0x1842, 0x0002}, + cmdLutElement{0x0e, 0x00, -1, 0x01, 0x1842, 0x0003}, + cmdLutElement{0x0e, 0x00, -1, 0x02, 0x1842, 0x0004}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0005}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0006}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0007}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0008}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0009}, + cmdLutElement{0x18, 0x00, -1, 0x00, 0x5842, 0x0002}, + cmdLutElement{0x18, 0x00, -1, 0x01, 0x5842, 0x0003}, + cmdLutElement{0x18, 0x00, -1, 0x02, 0x5842, 0x0004}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0005}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0006}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0007}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0008}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0009}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000a, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000a, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000a, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000a, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000a, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000a, 0x0846}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000e, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000e, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000e, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000e, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000e, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000e, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x0012, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x0012, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x0012, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x0012, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x0012, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x0012, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x001a, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x001a, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x001a, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x001a, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x001a, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x001a, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0022, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0022, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0022, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0022, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0022, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0022, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0032, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0032, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0032, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0032, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0032, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0032, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0042, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0042, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0042, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0042, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0042, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0042, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0062, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0062, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0062, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0062, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0062, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0062, 0x0846}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000a}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000c}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x000e}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x0012}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x0016}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x001e}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0026}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0036}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000a}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000c}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x000e}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x0012}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x0016}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x001e}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0026}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0036}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000a}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000c}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x000e}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x0012}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x0016}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x001e}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0026}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0036}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000a}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000c}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x000e}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x0012}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x0016}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x001e}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0026}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0036}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000a}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000c}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x000e}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x0012}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x0016}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x001e}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0026}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0036}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000a}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000c}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x000e}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x0012}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x0016}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x001e}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0026}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0036}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000a}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000c}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x000e}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x0012}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x0016}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x001e}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0026}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0036}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000a}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000c}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x000e}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x0012}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x0016}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x001e}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0026}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0036}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0046}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0066}, + cmdLutElement{0x06, 0x06, -1, 0x03, 0x0082, 0x0086}, + cmdLutElement{0x06, 0x07, -1, 0x03, 0x0082, 0x00c6}, + cmdLutElement{0x06, 0x08, -1, 0x03, 0x0082, 0x0146}, + cmdLutElement{0x06, 0x09, -1, 0x03, 0x0082, 0x0246}, + cmdLutElement{0x06, 0x0a, -1, 0x03, 0x0082, 0x0446}, + cmdLutElement{0x06, 0x18, -1, 0x03, 0x0082, 0x0846}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0046}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0066}, + cmdLutElement{0x07, 0x06, -1, 0x03, 0x00c2, 0x0086}, + cmdLutElement{0x07, 0x07, -1, 0x03, 0x00c2, 0x00c6}, + cmdLutElement{0x07, 0x08, -1, 0x03, 0x00c2, 0x0146}, + cmdLutElement{0x07, 0x09, -1, 0x03, 0x00c2, 0x0246}, + cmdLutElement{0x07, 0x0a, -1, 0x03, 0x00c2, 0x0446}, + cmdLutElement{0x07, 0x18, -1, 0x03, 0x00c2, 0x0846}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0046}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0066}, + cmdLutElement{0x08, 0x06, -1, 0x03, 0x0142, 0x0086}, + cmdLutElement{0x08, 0x07, -1, 0x03, 0x0142, 0x00c6}, + cmdLutElement{0x08, 0x08, -1, 0x03, 0x0142, 0x0146}, + cmdLutElement{0x08, 0x09, -1, 0x03, 0x0142, 0x0246}, + cmdLutElement{0x08, 0x0a, -1, 0x03, 0x0142, 0x0446}, + cmdLutElement{0x08, 0x18, -1, 0x03, 0x0142, 0x0846}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0046}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0066}, + cmdLutElement{0x09, 0x06, -1, 0x03, 0x0242, 0x0086}, + cmdLutElement{0x09, 0x07, -1, 0x03, 0x0242, 0x00c6}, + cmdLutElement{0x09, 0x08, -1, 0x03, 0x0242, 0x0146}, + cmdLutElement{0x09, 0x09, -1, 0x03, 0x0242, 0x0246}, + cmdLutElement{0x09, 0x0a, -1, 0x03, 0x0242, 0x0446}, + cmdLutElement{0x09, 0x18, -1, 0x03, 0x0242, 0x0846}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0046}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0066}, + cmdLutElement{0x0a, 0x06, -1, 0x03, 0x0442, 0x0086}, + cmdLutElement{0x0a, 0x07, -1, 0x03, 0x0442, 0x00c6}, + cmdLutElement{0x0a, 0x08, -1, 0x03, 0x0442, 0x0146}, + cmdLutElement{0x0a, 0x09, -1, 0x03, 0x0442, 0x0246}, + cmdLutElement{0x0a, 0x0a, -1, 0x03, 0x0442, 0x0446}, + cmdLutElement{0x0a, 0x18, -1, 0x03, 0x0442, 0x0846}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0046}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0066}, + cmdLutElement{0x0c, 0x06, -1, 0x03, 0x0842, 0x0086}, + cmdLutElement{0x0c, 0x07, -1, 0x03, 0x0842, 0x00c6}, + cmdLutElement{0x0c, 0x08, -1, 0x03, 0x0842, 0x0146}, + cmdLutElement{0x0c, 0x09, -1, 0x03, 0x0842, 0x0246}, + cmdLutElement{0x0c, 0x0a, -1, 0x03, 0x0842, 0x0446}, + cmdLutElement{0x0c, 0x18, -1, 0x03, 0x0842, 0x0846}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0046}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0066}, + cmdLutElement{0x0e, 0x06, -1, 0x03, 0x1842, 0x0086}, + cmdLutElement{0x0e, 0x07, -1, 0x03, 0x1842, 0x00c6}, + cmdLutElement{0x0e, 0x08, -1, 0x03, 0x1842, 0x0146}, + cmdLutElement{0x0e, 0x09, -1, 0x03, 0x1842, 0x0246}, + cmdLutElement{0x0e, 0x0a, -1, 0x03, 0x1842, 0x0446}, + cmdLutElement{0x0e, 0x18, -1, 0x03, 0x1842, 0x0846}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0046}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0066}, + cmdLutElement{0x18, 0x06, -1, 0x03, 0x5842, 0x0086}, + cmdLutElement{0x18, 0x07, -1, 0x03, 0x5842, 0x00c6}, + cmdLutElement{0x18, 0x08, -1, 0x03, 0x5842, 0x0146}, + cmdLutElement{0x18, 0x09, -1, 0x03, 0x5842, 0x0246}, + cmdLutElement{0x18, 0x0a, -1, 0x03, 0x5842, 0x0446}, + cmdLutElement{0x18, 0x18, -1, 0x03, 0x5842, 0x0846}, +} diff --git a/vendor/github.com/andybalholm/brotli/quality.go b/vendor/github.com/andybalholm/brotli/quality.go new file mode 100644 index 00000000000..49709a38239 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/quality.go @@ -0,0 +1,196 @@ +package brotli + +const fastOnePassCompressionQuality = 0 + +const fastTwoPassCompressionQuality = 1 + +const zopflificationQuality = 10 + +const hqZopflificationQuality = 11 + +const maxQualityForStaticEntropyCodes = 2 + +const minQualityForBlockSplit = 4 + +const minQualityForNonzeroDistanceParams = 4 + +const minQualityForOptimizeHistograms = 4 + +const minQualityForExtensiveReferenceSearch = 5 + +const minQualityForContextModeling = 5 + +const minQualityForHqContextModeling = 7 + +const minQualityForHqBlockSplitting = 10 + +/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting, + so we buffer at most this much literals and commands. */ +const maxNumDelayedSymbols = 0x2FFF + +/* Returns hash-table size for quality levels 0 and 1. */ +func maxHashTableSize(quality int) uint { + if quality == fastOnePassCompressionQuality { + return 1 << 15 + } else { + return 1 << 17 + } +} + +/* The maximum length for which the zopflification uses distinct distances. */ +const maxZopfliLenQuality10 = 150 + +const maxZopfliLenQuality11 = 325 + +/* Do not thoroughly search when a long copy is found. */ +const longCopyQuickStep = 16384 + +func maxZopfliLen(params *encoderParams) uint { + if params.quality <= 10 { + return maxZopfliLenQuality10 + } else { + return maxZopfliLenQuality11 + } +} + +/* Number of best candidates to evaluate to expand Zopfli chain. */ +func maxZopfliCandidates(params *encoderParams) uint { + if params.quality <= 10 { + return 1 + } else { + return 5 + } +} + +func sanitizeParams(params *encoderParams) { + params.quality = brotli_min_int(maxQuality, brotli_max_int(minQuality, params.quality)) + if params.quality <= maxQualityForStaticEntropyCodes { + params.large_window = false + } + + if params.lgwin < minWindowBits { + params.lgwin = minWindowBits + } else { + var max_lgwin int + if params.large_window { + max_lgwin = largeMaxWindowBits + } else { + max_lgwin = maxWindowBits + } + if params.lgwin > uint(max_lgwin) { + params.lgwin = uint(max_lgwin) + } + } +} + +/* Returns optimized lg_block value. */ +func computeLgBlock(params *encoderParams) int { + var lgblock int = params.lgblock + if params.quality == fastOnePassCompressionQuality || params.quality == fastTwoPassCompressionQuality { + lgblock = int(params.lgwin) + } else if params.quality < minQualityForBlockSplit { + lgblock = 14 + } else if lgblock == 0 { + lgblock = 16 + if params.quality >= 9 && params.lgwin > uint(lgblock) { + lgblock = brotli_min_int(18, int(params.lgwin)) + } + } else { + lgblock = brotli_min_int(maxInputBlockBits, brotli_max_int(minInputBlockBits, lgblock)) + } + + return lgblock +} + +/* Returns log2 of the size of main ring buffer area. + Allocate at least lgwin + 1 bits for the ring buffer so that the newly + added block fits there completely and we still get lgwin bits and at least + read_block_size_bits + 1 bits because the copy tail length needs to be + smaller than ring-buffer size. */ +func computeRbBits(params *encoderParams) int { + return 1 + brotli_max_int(int(params.lgwin), params.lgblock) +} + +func maxMetablockSize(params *encoderParams) uint { + var bits int = brotli_min_int(computeRbBits(params), maxInputBlockBits) + return uint(1) << uint(bits) +} + +/* When searching for backward references and have not seen matches for a long + time, we can skip some match lookups. Unsuccessful match lookups are very + expensive and this kind of a heuristic speeds up compression quite a lot. + At first 8 byte strides are taken and every second byte is put to hasher. + After 4x more literals stride by 16 bytes, every put 4-th byte to hasher. + Applied only to qualities 2 to 9. */ +func literalSpreeLengthForSparseSearch(params *encoderParams) uint { + if params.quality < 9 { + return 64 + } else { + return 512 + } +} + +func chooseHasher(params *encoderParams, hparams *hasherParams) { + if params.quality > 9 { + hparams.type_ = 10 + } else if params.quality == 4 && params.size_hint >= 1<<20 { + hparams.type_ = 54 + } else if params.quality < 5 { + hparams.type_ = params.quality + } else if params.lgwin <= 16 { + if params.quality < 7 { + hparams.type_ = 40 + } else if params.quality < 9 { + hparams.type_ = 41 + } else { + hparams.type_ = 42 + } + } else if params.size_hint >= 1<<20 && params.lgwin >= 19 { + hparams.type_ = 6 + hparams.block_bits = params.quality - 1 + hparams.bucket_bits = 15 + hparams.hash_len = 5 + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } else { + hparams.type_ = 5 + hparams.block_bits = params.quality - 1 + if params.quality < 7 { + hparams.bucket_bits = 14 + } else { + hparams.bucket_bits = 15 + } + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } + + if params.lgwin > 24 { + /* Different hashers for large window brotli: not for qualities <= 2, + these are too fast for large window. Not for qualities >= 10: their + hasher already works well with large window. So the changes are: + H3 --> H35: for quality 3. + H54 --> H55: for quality 4 with size hint > 1MB + H6 --> H65: for qualities 5, 6, 7, 8, 9. */ + if hparams.type_ == 3 { + hparams.type_ = 35 + } + + if hparams.type_ == 54 { + hparams.type_ = 55 + } + + if hparams.type_ == 6 { + hparams.type_ = 65 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/reader.go b/vendor/github.com/andybalholm/brotli/reader.go new file mode 100644 index 00000000000..cdc67645a8a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/reader.go @@ -0,0 +1,102 @@ +package brotli + +import ( + "errors" + "io" +) + +type decodeError int + +func (err decodeError) Error() string { + return "brotli: " + string(decoderErrorString(int(err))) +} + +var errExcessiveInput = errors.New("brotli: excessive input") +var errInvalidState = errors.New("brotli: invalid state") + +// readBufSize is a "good" buffer size that avoids excessive round-trips +// between C and Go but doesn't waste too much memory on buffering. +// It is arbitrarily chosen to be equal to the constant used in io.Copy. +const readBufSize = 32 * 1024 + +// NewReader creates a new Reader reading the given reader. +func NewReader(src io.Reader) *Reader { + r := new(Reader) + r.Reset(src) + return r +} + +// Reset discards the Reader's state and makes it equivalent to the result of +// its original state from NewReader, but writing to src instead. +// This permits reusing a Reader rather than allocating a new one. +// Error is always nil +func (r *Reader) Reset(src io.Reader) error { + decoderStateInit(r) + r.src = src + if r.buf == nil { + r.buf = make([]byte, readBufSize) + } + return nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + if !decoderHasMoreOutput(r) && len(r.in) == 0 { + m, readErr := r.src.Read(r.buf) + if m == 0 { + // If readErr is `nil`, we just proxy underlying stream behavior. + return 0, readErr + } + r.in = r.buf[:m] + } + + if len(p) == 0 { + return 0, nil + } + + for { + var written uint + in_len := uint(len(r.in)) + out_len := uint(len(p)) + in_remaining := in_len + out_remaining := out_len + result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p) + written = out_len - out_remaining + n = int(written) + + switch result { + case decoderResultSuccess: + if len(r.in) > 0 { + return n, errExcessiveInput + } + return n, nil + case decoderResultError: + return n, decodeError(decoderGetErrorCode(r)) + case decoderResultNeedsMoreOutput: + if n == 0 { + return 0, io.ErrShortBuffer + } + return n, nil + case decoderNeedsMoreInput: + } + + if len(r.in) != 0 { + return 0, errInvalidState + } + + // Calling r.src.Read may block. Don't block if we have data to return. + if n > 0 { + return n, nil + } + + // Top off the buffer. + encN, err := r.src.Read(r.buf) + if encN == 0 { + // Not enough data to complete decoding. + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + r.in = r.buf[:encN] + } +} diff --git a/vendor/github.com/andybalholm/brotli/ringbuffer.go b/vendor/github.com/andybalholm/brotli/ringbuffer.go new file mode 100644 index 00000000000..1c8f86feece --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/ringbuffer.go @@ -0,0 +1,134 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of + data in a circular manner: writing a byte writes it to: + `position() % (1 << window_bits)'. + For convenience, the ringBuffer array contains another copy of the + first `1 << tail_bits' bytes: + buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits), + and another copy of the last two bytes: + buffer_[-1] == buffer_[(1 << window_bits) - 1] and + buffer_[-2] == buffer_[(1 << window_bits) - 2]. */ +type ringBuffer struct { + size_ uint32 + mask_ uint32 + tail_size_ uint32 + total_size_ uint32 + cur_size_ uint32 + pos_ uint32 + data_ []byte + buffer_ []byte +} + +func ringBufferInit(rb *ringBuffer) { + rb.pos_ = 0 +} + +func ringBufferSetup(params *encoderParams, rb *ringBuffer) { + var window_bits int = computeRbBits(params) + var tail_bits int = params.lgblock + *(*uint32)(&rb.size_) = 1 << uint(window_bits) + *(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1 + *(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits) + *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_ +} + +const kSlackForEightByteHashingEverywhere uint = 7 + +/* Allocates or re-allocates data_ to the given length + plus some slack + region before and after. Fills the slack regions with zeros. */ +func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) { + var new_data []byte + var i uint + size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere) + if cap(rb.data_) < size { + new_data = make([]byte, size) + } else { + new_data = rb.data_[:size] + } + if rb.data_ != nil { + copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)]) + } + + rb.data_ = new_data + rb.cur_size_ = buflen + rb.buffer_ = rb.data_[2:] + rb.data_[1] = 0 + rb.data_[0] = rb.data_[1] + for i = 0; i < kSlackForEightByteHashingEverywhere; i++ { + rb.buffer_[rb.cur_size_+uint32(i)] = 0 + } +} + +func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + if uint32(masked_pos) < rb.tail_size_ { + /* Just fill the tail buffer with the beginning data. */ + var p uint = uint(rb.size_ + uint32(masked_pos)) + copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))]) + } +} + +/* Push bytes into the ring buffer. */ +func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) { + if rb.pos_ == 0 && uint32(n) < rb.tail_size_ { + /* Special case for the first write: to process the first block, we don't + need to allocate the whole ring-buffer and we don't need the tail + either. However, we do this memory usage optimization only if the + first write is less than the tail size, which is also the input block + size, otherwise it is likely that other blocks will follow and we + will need to reallocate to the full size anyway. */ + rb.pos_ = uint32(n) + + ringBufferInitBuffer(rb.pos_, rb) + copy(rb.buffer_, bytes[:n]) + return + } + + if rb.cur_size_ < rb.total_size_ { + /* Lazily allocate the full buffer. */ + ringBufferInitBuffer(rb.total_size_, rb) + + /* Initialize the last two bytes to zero, so that we don't have to worry + later when we copy the last two bytes to the first two positions. */ + rb.buffer_[rb.size_-2] = 0 + + rb.buffer_[rb.size_-1] = 0 + } + { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + + /* The length of the writes is limited so that we do not need to worry + about a write */ + ringBufferWriteTail(bytes, n, rb) + + if uint32(masked_pos+n) <= rb.size_ { + /* A single write fits. */ + copy(rb.buffer_[masked_pos:], bytes[:n]) + } else { + /* Split into two writes. + Copy into the end of the buffer, including the tail buffer. */ + copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))]) + + /* Copy into the beginning of the buffer */ + copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))]) + } + } + { + var not_first_lap bool = rb.pos_&(1<<31) != 0 + var rb_pos_mask uint32 = (1 << 31) - 1 + rb.data_[0] = rb.buffer_[rb.size_-2] + rb.data_[1] = rb.buffer_[rb.size_-1] + rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask) + if not_first_lap { + /* Wrap, but preserve not-a-first-lap feature. */ + rb.pos_ |= 1 << 31 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/state.go b/vendor/github.com/andybalholm/brotli/state.go new file mode 100644 index 00000000000..d03348fe807 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/state.go @@ -0,0 +1,295 @@ +package brotli + +import "io" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Brotli state for partial streaming decoding. */ +const ( + stateUninited = iota + stateLargeWindowBits + stateInitialize + stateMetablockBegin + stateMetablockHeader + stateMetablockHeader2 + stateContextModes + stateCommandBegin + stateCommandInner + stateCommandPostDecodeLiterals + stateCommandPostWrapCopy + stateUncompressed + stateMetadata + stateCommandInnerWrite + stateMetablockDone + stateCommandPostWrite1 + stateCommandPostWrite2 + stateHuffmanCode0 + stateHuffmanCode1 + stateHuffmanCode2 + stateHuffmanCode3 + stateContextMap1 + stateContextMap2 + stateTreeGroup + stateDone +) + +const ( + stateMetablockHeaderNone = iota + stateMetablockHeaderEmpty + stateMetablockHeaderNibbles + stateMetablockHeaderSize + stateMetablockHeaderUncompressed + stateMetablockHeaderReserved + stateMetablockHeaderBytes + stateMetablockHeaderMetadata +) + +const ( + stateUncompressedNone = iota + stateUncompressedWrite +) + +const ( + stateTreeGroupNone = iota + stateTreeGroupLoop +) + +const ( + stateContextMapNone = iota + stateContextMapReadPrefix + stateContextMapHuffman + stateContextMapDecode + stateContextMapTransform +) + +const ( + stateHuffmanNone = iota + stateHuffmanSimpleSize + stateHuffmanSimpleRead + stateHuffmanSimpleBuild + stateHuffmanComplex + stateHuffmanLengthSymbols +) + +const ( + stateDecodeUint8None = iota + stateDecodeUint8Short + stateDecodeUint8Long +) + +const ( + stateReadBlockLengthNone = iota + stateReadBlockLengthSuffix +) + +type Reader struct { + src io.Reader + buf []byte // scratch space for reading from src + in []byte // current chunk to decode; usually aliases buf + + state int + loop_counter int + br bitReader + buffer struct { + u64 uint64 + u8 [8]byte + } + buffer_length uint32 + pos int + max_backward_distance int + max_distance int + ringbuffer_size int + ringbuffer_mask int + dist_rb_idx int + dist_rb [4]int + error_code int + sub_loop_counter uint32 + ringbuffer []byte + ringbuffer_end []byte + htree_command []huffmanCode + context_lookup []byte + context_map_slice []byte + dist_context_map_slice []byte + literal_hgroup huffmanTreeGroup + insert_copy_hgroup huffmanTreeGroup + distance_hgroup huffmanTreeGroup + block_type_trees []huffmanCode + block_len_trees []huffmanCode + trivial_literal_context int + distance_context int + meta_block_remaining_len int + block_length_index uint32 + block_length [3]uint32 + num_block_types [3]uint32 + block_type_rb [6]uint32 + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + distance_postfix_mask int + num_dist_htrees uint32 + dist_context_map []byte + literal_htree []huffmanCode + dist_htree_index byte + repeat_code_len uint32 + prev_code_len uint32 + copy_length int + distance_code int + rb_roundtrips uint + partial_pos_out uint + symbol uint32 + repeat uint32 + space uint32 + table [32]huffmanCode + symbol_lists symbolList + symbols_lists_array [huffmanMaxCodeLength + 1 + numCommandSymbols]uint16 + next_symbol [32]int + code_length_code_lengths [codeLengthCodes]byte + code_length_histo [16]uint16 + htree_index int + next []huffmanCode + context_index uint32 + max_run_length_prefix uint32 + code uint32 + context_map_table [huffmanMaxSize272]huffmanCode + substate_metablock_header int + substate_tree_group int + substate_context_map int + substate_uncompressed int + substate_huffman int + substate_decode_uint8 int + substate_read_block_length int + is_last_metablock uint + is_uncompressed uint + is_metadata uint + should_wrap_ringbuffer uint + canny_ringbuffer_allocation uint + large_window bool + size_nibbles uint + window_bits uint32 + new_ringbuffer_size int + num_literal_htrees uint32 + context_map []byte + context_modes []byte + dictionary *dictionary + transforms *transforms + trivial_literal_contexts [8]uint32 +} + +func decoderStateInit(s *Reader) bool { + s.error_code = 0 /* BROTLI_DECODER_NO_ERROR */ + + initBitReader(&s.br) + s.state = stateUninited + s.large_window = false + s.substate_metablock_header = stateMetablockHeaderNone + s.substate_tree_group = stateTreeGroupNone + s.substate_context_map = stateContextMapNone + s.substate_uncompressed = stateUncompressedNone + s.substate_huffman = stateHuffmanNone + s.substate_decode_uint8 = stateDecodeUint8None + s.substate_read_block_length = stateReadBlockLengthNone + + s.buffer_length = 0 + s.loop_counter = 0 + s.pos = 0 + s.rb_roundtrips = 0 + s.partial_pos_out = 0 + + s.block_type_trees = nil + s.block_len_trees = nil + s.ringbuffer = nil + s.ringbuffer_size = 0 + s.new_ringbuffer_size = 0 + s.ringbuffer_mask = 0 + + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.dist_context_map_slice = nil + + s.sub_loop_counter = 0 + + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil + + s.is_last_metablock = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + s.should_wrap_ringbuffer = 0 + s.canny_ringbuffer_allocation = 1 + + s.window_bits = 0 + s.max_distance = 0 + s.dist_rb[0] = 16 + s.dist_rb[1] = 15 + s.dist_rb[2] = 11 + s.dist_rb[3] = 4 + s.dist_rb_idx = 0 + s.block_type_trees = nil + s.block_len_trees = nil + + s.symbol_lists.storage = s.symbols_lists_array[:] + s.symbol_lists.offset = huffmanMaxCodeLength + 1 + + s.dictionary = getDictionary() + s.transforms = getTransforms() + + return true +} + +func decoderStateMetablockBegin(s *Reader) { + s.meta_block_remaining_len = 0 + s.block_length[0] = 1 << 24 + s.block_length[1] = 1 << 24 + s.block_length[2] = 1 << 24 + s.num_block_types[0] = 1 + s.num_block_types[1] = 1 + s.num_block_types[2] = 1 + s.block_type_rb[0] = 1 + s.block_type_rb[1] = 0 + s.block_type_rb[2] = 1 + s.block_type_rb[3] = 0 + s.block_type_rb[4] = 1 + s.block_type_rb[5] = 0 + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.literal_htree = nil + s.dist_context_map_slice = nil + s.dist_htree_index = 0 + s.context_lookup = nil + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil +} + +func decoderStateCleanupAfterMetablock(s *Reader) { + s.context_modes = nil + s.context_map = nil + s.dist_context_map = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.htrees = nil +} + +func decoderHuffmanTreeGroupInit(s *Reader, group *huffmanTreeGroup, alphabet_size uint32, max_symbol uint32, ntrees uint32) bool { + var max_table_size uint = uint(kMaxHuffmanTableSize[(alphabet_size+31)>>5]) + group.alphabet_size = uint16(alphabet_size) + group.max_symbol = uint16(max_symbol) + group.num_htrees = uint16(ntrees) + group.htrees = make([][]huffmanCode, ntrees) + group.codes = make([]huffmanCode, (uint(ntrees) * max_table_size)) + return !(group.codes == nil) +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict.go b/vendor/github.com/andybalholm/brotli/static_dict.go new file mode 100644 index 00000000000..bc05566d6f8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict.go @@ -0,0 +1,662 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Class to model the static dictionary. */ + +const maxStaticDictionaryMatchLen = 37 + +const kInvalidMatch uint32 = 0xFFFFFFF + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ +func hash(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> uint(32-kDictNumBits) +} + +func addMatch(distance uint, len uint, len_code uint, matches []uint32) { + var match uint32 = uint32((distance << 5) + len_code) + matches[len] = brotli_min_uint32_t(matches[len], match) +} + +func dictMatchLength(dict *dictionary, data []byte, id uint, len uint, maxlen uint) uint { + var offset uint = uint(dict.offsets_by_length[len]) + len*id + return findMatchLengthWithLimit(dict.data[offset:], data, brotli_min_size_t(uint(len), maxlen)) +} + +func isMatch(d *dictionary, w dictWord, data []byte, max_length uint) bool { + if uint(w.len) > max_length { + return false + } else { + var offset uint = uint(d.offsets_by_length[w.len]) + uint(w.len)*uint(w.idx) + var dict []byte = d.data[offset:] + if w.transform == 0 { + /* Match against base dictionary word. */ + return findMatchLengthWithLimit(dict, data, uint(w.len)) == uint(w.len) + } else if w.transform == 10 { + /* Match against uppercase first transform. + Note that there are only ASCII uppercase words in the lookup table. */ + return dict[0] >= 'a' && dict[0] <= 'z' && (dict[0]^32) == data[0] && findMatchLengthWithLimit(dict[1:], data[1:], uint(w.len)-1) == uint(w.len-1) + } else { + /* Match against uppercase all transform. + Note that there are only ASCII uppercase words in the lookup table. */ + var i uint + for i = 0; i < uint(w.len); i++ { + if dict[i] >= 'a' && dict[i] <= 'z' { + if (dict[i] ^ 32) != data[i] { + return false + } + } else { + if dict[i] != data[i] { + return false + } + } + } + + return true + } + } +} + +func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_length uint, max_length uint, matches []uint32) bool { + var has_found_match bool = false + { + var offset uint = uint(dict.buckets[hash(data)]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var matchlen uint = dictMatchLength(dict.words, data, id, l, max_length) + var s []byte + var minlen uint + var maxlen uint + var len uint + + /* Transform "" + BROTLI_TRANSFORM_IDENTITY + "" */ + if matchlen == l { + addMatch(id, l, l, matches) + has_found_match = true + } + + /* Transforms "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "" and + "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "ing " */ + if matchlen >= l-1 { + addMatch(id+12*n, l-1, l, matches) + if l+2 < max_length && data[l-1] == 'i' && data[l] == 'n' && data[l+1] == 'g' && data[l+2] == ' ' { + addMatch(id+49*n, l+3, l, matches) + } + + has_found_match = true + } + + /* Transform "" + BROTLI_TRANSFORM_OMIT_LAST_# + "" (# = 2 .. 9) */ + minlen = min_length + + if l > 9 { + minlen = brotli_max_size_t(minlen, l-9) + } + maxlen = brotli_min_size_t(matchlen, l-2) + for len = minlen; len <= maxlen; len++ { + var cut uint = l - len + var transform_id uint = (cut << 2) + uint((dict.cutoffTransforms>>(cut*6))&0x3F) + addMatch(id+transform_id*n, uint(len), l, matches) + has_found_match = true + } + + if matchlen < l || l+6 >= max_length { + continue + } + + s = data[l:] + + /* Transforms "" + BROTLI_TRANSFORM_IDENTITY + */ + if s[0] == ' ' { + addMatch(id+n, l+1, l, matches) + if s[1] == 'a' { + if s[2] == ' ' { + addMatch(id+28*n, l+3, l, matches) + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+46*n, l+4, l, matches) + } + } else if s[2] == 't' { + if s[3] == ' ' { + addMatch(id+60*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == 'd' && s[4] == ' ' { + addMatch(id+10*n, l+5, l, matches) + } + } + } else if s[1] == 'b' { + if s[2] == 'y' && s[3] == ' ' { + addMatch(id+38*n, l+4, l, matches) + } + } else if s[1] == 'i' { + if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+16*n, l+4, l, matches) + } + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+47*n, l+4, l, matches) + } + } + } else if s[1] == 'f' { + if s[2] == 'o' { + if s[3] == 'r' && s[4] == ' ' { + addMatch(id+25*n, l+5, l, matches) + } + } else if s[2] == 'r' { + if s[3] == 'o' && s[4] == 'm' && s[5] == ' ' { + addMatch(id+37*n, l+6, l, matches) + } + } + } else if s[1] == 'o' { + if s[2] == 'f' { + if s[3] == ' ' { + addMatch(id+8*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+45*n, l+4, l, matches) + } + } + } else if s[1] == 'n' { + if s[2] == 'o' && s[3] == 't' && s[4] == ' ' { + addMatch(id+80*n, l+5, l, matches) + } + } else if s[1] == 't' { + if s[2] == 'h' { + if s[3] == 'e' { + if s[4] == ' ' { + addMatch(id+5*n, l+5, l, matches) + } + } else if s[3] == 'a' { + if s[4] == 't' && s[5] == ' ' { + addMatch(id+29*n, l+6, l, matches) + } + } + } else if s[2] == 'o' { + if s[3] == ' ' { + addMatch(id+17*n, l+4, l, matches) + } + } + } else if s[1] == 'w' { + if s[2] == 'i' && s[3] == 't' && s[4] == 'h' && s[5] == ' ' { + addMatch(id+35*n, l+6, l, matches) + } + } + } else if s[0] == '"' { + addMatch(id+19*n, l+1, l, matches) + if s[1] == '>' { + addMatch(id+21*n, l+2, l, matches) + } + } else if s[0] == '.' { + addMatch(id+20*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+31*n, l+2, l, matches) + if s[2] == 'T' && s[3] == 'h' { + if s[4] == 'e' { + if s[5] == ' ' { + addMatch(id+43*n, l+6, l, matches) + } + } else if s[4] == 'i' { + if s[5] == 's' && s[6] == ' ' { + addMatch(id+75*n, l+7, l, matches) + } + } + } + } + } else if s[0] == ',' { + addMatch(id+76*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+14*n, l+2, l, matches) + } + } else if s[0] == '\n' { + addMatch(id+22*n, l+1, l, matches) + if s[1] == '\t' { + addMatch(id+50*n, l+2, l, matches) + } + } else if s[0] == ']' { + addMatch(id+24*n, l+1, l, matches) + } else if s[0] == '\'' { + addMatch(id+36*n, l+1, l, matches) + } else if s[0] == ':' { + addMatch(id+51*n, l+1, l, matches) + } else if s[0] == '(' { + addMatch(id+57*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+70*n, l+2, l, matches) + } else if s[1] == '\'' { + addMatch(id+86*n, l+2, l, matches) + } + } else if s[0] == 'a' { + if s[1] == 'l' && s[2] == ' ' { + addMatch(id+84*n, l+3, l, matches) + } + } else if s[0] == 'e' { + if s[1] == 'd' { + if s[2] == ' ' { + addMatch(id+53*n, l+3, l, matches) + } + } else if s[1] == 'r' { + if s[2] == ' ' { + addMatch(id+82*n, l+3, l, matches) + } + } else if s[1] == 's' { + if s[2] == 't' && s[3] == ' ' { + addMatch(id+95*n, l+4, l, matches) + } + } + } else if s[0] == 'f' { + if s[1] == 'u' && s[2] == 'l' && s[3] == ' ' { + addMatch(id+90*n, l+4, l, matches) + } + } else if s[0] == 'i' { + if s[1] == 'v' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+92*n, l+4, l, matches) + } + } else if s[1] == 'z' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+100*n, l+4, l, matches) + } + } + } else if s[0] == 'l' { + if s[1] == 'e' { + if s[2] == 's' && s[3] == 's' && s[4] == ' ' { + addMatch(id+93*n, l+5, l, matches) + } + } else if s[1] == 'y' { + if s[2] == ' ' { + addMatch(id+61*n, l+3, l, matches) + } + } + } else if s[0] == 'o' { + if s[1] == 'u' && s[2] == 's' && s[3] == ' ' { + addMatch(id+106*n, l+4, l, matches) + } + } + } else { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data, max_length) { + continue + } + + /* Transform "" + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 44 + } else { + tmp = 9 + } + addMatch(id+uint(tmp)*n, l, l, matches) + + has_found_match = true + if l+1 >= max_length { + continue + } + + /* Transforms "" + kUppercase{First,All} + */ + s = data[l:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 68 + } else { + tmp = 4 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '"' { + var tmp int + if is_all_caps { + tmp = 87 + } else { + tmp = 66 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == '>' { + var tmp int + if is_all_caps { + tmp = 97 + } else { + tmp = 69 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 101 + } else { + tmp = 79 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 114 + } else { + tmp = 88 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == ',' { + var tmp int + if is_all_caps { + tmp = 112 + } else { + tmp = 99 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 107 + } else { + tmp = 58 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '\'' { + var tmp int + if is_all_caps { + tmp = 94 + } else { + tmp = 74 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '(' { + var tmp int + if is_all_caps { + tmp = 113 + } else { + tmp = 78 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 105 + } else { + tmp = 104 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 116 + } else { + tmp = 108 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } + } + } + } + + /* Transforms with prefixes " " and "." */ + if max_length >= 5 && (data[0] == ' ' || data[0] == '.') { + var is_space bool = (data[0] == ' ') + var offset uint = uint(dict.buckets[hash(data[1:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + "" and + "." + BROTLI_TRANSFORM_IDENTITY + "" */ + var tmp int + if is_space { + tmp = 6 + } else { + tmp = 32 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + and + "." + BROTLI_TRANSFORM_IDENTITY + + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_space { + tmp = 2 + } else { + tmp = 77 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == '(' { + var tmp int + if is_space { + tmp = 89 + } else { + tmp = 67 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if is_space { + if s[0] == ',' { + addMatch(id+103*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+33*n, l+3, l, matches) + } + } else if s[0] == '.' { + addMatch(id+71*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+52*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+81*n, l+3, l, matches) + } else if s[1] == '\'' { + addMatch(id+98*n, l+3, l, matches) + } + } + } + } else if is_space { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 85 + } else { + tmp = 30 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + kUppercase{First,All} + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 83 + } else { + tmp = 15 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == ',' { + if !is_all_caps { + addMatch(id+109*n, l+2, l, matches) + } + + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 111 + } else { + tmp = 65 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 115 + } else { + tmp = 96 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 117 + } else { + tmp = 91 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 110 + } else { + tmp = 118 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 119 + } else { + tmp = 120 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } + } + } + } + + if max_length >= 6 { + /* Transforms with prefixes "e ", "s ", ", " and "\xC2\xA0" */ + if (data[1] == ' ' && (data[0] == 'e' || data[0] == 's' || data[0] == ',')) || (data[0] == 0xC2 && data[1] == 0xA0) { + var offset uint = uint(dict.buckets[hash(data[2:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[2:], max_length-2) { + if data[0] == 0xC2 { + addMatch(id+102*n, l+2, l, matches) + has_found_match = true + } else if l+2 < max_length && data[l+2] == ' ' { + var t uint = 13 + if data[0] == 'e' { + t = 18 + } else if data[0] == 's' { + t = 7 + } + addMatch(id+t*n, l+3, l, matches) + has_found_match = true + } + } + } + } + } + + if max_length >= 9 { + /* Transforms with prefixes " the " and ".com/" */ + if (data[0] == ' ' && data[1] == 't' && data[2] == 'h' && data[3] == 'e' && data[4] == ' ') || (data[0] == '.' && data[1] == 'c' && data[2] == 'o' && data[3] == 'm' && data[4] == '/') { + var offset uint = uint(dict.buckets[hash(data[5:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[5:], max_length-5) { + var tmp int + if data[0] == ' ' { + tmp = 41 + } else { + tmp = 72 + } + addMatch(id+uint(tmp)*n, l+5, l, matches) + has_found_match = true + if l+5 < max_length { + var s []byte = data[l+5:] + if data[0] == ' ' { + if l+8 < max_length && s[0] == ' ' && s[1] == 'o' && s[2] == 'f' && s[3] == ' ' { + addMatch(id+62*n, l+9, l, matches) + if l+12 < max_length && s[4] == 't' && s[5] == 'h' && s[6] == 'e' && s[7] == ' ' { + addMatch(id+73*n, l+13, l, matches) + } + } + } + } + } + } + } + } + + return has_found_match +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict_lut.go b/vendor/github.com/andybalholm/brotli/static_dict_lut.go new file mode 100644 index 00000000000..b33963e967a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict_lut.go @@ -0,0 +1,75094 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Lookup table for static dictionary and transforms. */ + +type dictWord struct { + len byte + transform byte + idx uint16 +} + +const kDictNumBits int = 15 + +const kDictHashMul32 uint32 = 0x1E35A7BD + +var kStaticDictionaryBuckets = [32768]uint16{ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 6, + 0, + 0, + 0, + 0, + 0, + 20, + 0, + 0, + 0, + 21, + 0, + 22, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23, + 0, + 0, + 25, + 0, + 29, + 0, + 53, + 0, + 0, + 0, + 0, + 0, + 0, + 55, + 0, + 0, + 0, + 0, + 0, + 0, + 61, + 76, + 0, + 0, + 0, + 94, + 0, + 0, + 0, + 0, + 0, + 0, + 96, + 0, + 97, + 0, + 98, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 99, + 101, + 106, + 108, + 0, + 0, + 0, + 0, + 0, + 110, + 0, + 111, + 112, + 0, + 113, + 118, + 124, + 0, + 0, + 0, + 0, + 0, + 125, + 128, + 0, + 0, + 0, + 0, + 129, + 0, + 0, + 131, + 0, + 0, + 0, + 0, + 0, + 0, + 132, + 0, + 0, + 135, + 0, + 0, + 0, + 137, + 0, + 0, + 0, + 0, + 0, + 138, + 139, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 142, + 143, + 144, + 0, + 0, + 0, + 0, + 0, + 145, + 0, + 0, + 0, + 146, + 149, + 151, + 152, + 0, + 0, + 153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 154, + 0, + 0, + 0, + 0, + 0, + 0, + 155, + 0, + 0, + 0, + 0, + 160, + 182, + 0, + 0, + 0, + 0, + 0, + 0, + 183, + 0, + 0, + 0, + 188, + 189, + 0, + 0, + 192, + 0, + 0, + 0, + 0, + 0, + 0, + 194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 197, + 202, + 209, + 0, + 0, + 210, + 0, + 224, + 0, + 0, + 0, + 225, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 231, + 0, + 0, + 0, + 232, + 0, + 240, + 0, + 0, + 242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 244, + 0, + 0, + 0, + 246, + 0, + 0, + 249, + 251, + 253, + 0, + 0, + 0, + 0, + 0, + 258, + 0, + 0, + 261, + 263, + 0, + 0, + 0, + 267, + 0, + 0, + 268, + 0, + 269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 271, + 0, + 0, + 0, + 0, + 0, + 0, + 272, + 0, + 273, + 0, + 277, + 0, + 278, + 286, + 0, + 0, + 0, + 0, + 287, + 0, + 289, + 290, + 291, + 0, + 0, + 0, + 295, + 0, + 0, + 296, + 297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 298, + 0, + 0, + 0, + 299, + 0, + 0, + 305, + 0, + 324, + 0, + 0, + 0, + 0, + 0, + 327, + 0, + 328, + 329, + 0, + 0, + 0, + 0, + 336, + 0, + 0, + 340, + 0, + 341, + 342, + 343, + 0, + 0, + 346, + 0, + 348, + 0, + 0, + 0, + 0, + 0, + 0, + 349, + 351, + 0, + 0, + 355, + 0, + 363, + 0, + 364, + 0, + 368, + 369, + 0, + 370, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 373, + 0, + 375, + 0, + 0, + 0, + 0, + 376, + 377, + 0, + 0, + 394, + 395, + 396, + 0, + 0, + 398, + 0, + 0, + 0, + 0, + 400, + 0, + 0, + 408, + 0, + 0, + 0, + 0, + 420, + 0, + 0, + 0, + 0, + 0, + 0, + 421, + 0, + 0, + 422, + 423, + 0, + 0, + 429, + 435, + 436, + 442, + 0, + 0, + 443, + 0, + 444, + 445, + 453, + 456, + 0, + 457, + 0, + 0, + 0, + 0, + 0, + 458, + 0, + 0, + 0, + 459, + 0, + 0, + 0, + 460, + 0, + 462, + 463, + 465, + 0, + 0, + 0, + 0, + 0, + 0, + 466, + 469, + 0, + 0, + 0, + 0, + 0, + 0, + 470, + 0, + 0, + 0, + 474, + 0, + 476, + 0, + 0, + 0, + 0, + 483, + 0, + 485, + 0, + 0, + 0, + 486, + 0, + 0, + 488, + 491, + 492, + 0, + 0, + 497, + 499, + 500, + 0, + 501, + 0, + 0, + 0, + 505, + 0, + 0, + 506, + 0, + 0, + 0, + 507, + 0, + 0, + 0, + 509, + 0, + 0, + 0, + 0, + 511, + 512, + 519, + 0, + 0, + 0, + 0, + 0, + 0, + 529, + 530, + 0, + 0, + 0, + 534, + 0, + 0, + 0, + 0, + 543, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 553, + 0, + 0, + 0, + 0, + 557, + 560, + 0, + 0, + 0, + 0, + 0, + 0, + 561, + 0, + 564, + 0, + 0, + 0, + 0, + 0, + 0, + 565, + 566, + 0, + 575, + 0, + 619, + 0, + 620, + 0, + 0, + 623, + 624, + 0, + 0, + 0, + 625, + 0, + 0, + 626, + 627, + 0, + 0, + 628, + 0, + 0, + 0, + 0, + 630, + 0, + 631, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 641, + 0, + 0, + 0, + 0, + 643, + 656, + 668, + 0, + 0, + 0, + 673, + 0, + 0, + 0, + 674, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 682, + 0, + 687, + 0, + 690, + 0, + 693, + 699, + 700, + 0, + 0, + 0, + 0, + 0, + 0, + 704, + 705, + 0, + 0, + 0, + 0, + 707, + 710, + 0, + 711, + 0, + 0, + 0, + 0, + 726, + 0, + 0, + 729, + 0, + 0, + 0, + 730, + 731, + 0, + 0, + 0, + 0, + 0, + 752, + 0, + 0, + 0, + 762, + 0, + 763, + 0, + 0, + 767, + 0, + 0, + 0, + 770, + 774, + 0, + 0, + 775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 776, + 0, + 0, + 0, + 777, + 783, + 0, + 0, + 0, + 785, + 788, + 0, + 0, + 0, + 0, + 790, + 0, + 0, + 0, + 793, + 0, + 0, + 0, + 0, + 794, + 0, + 0, + 804, + 819, + 821, + 0, + 827, + 0, + 0, + 0, + 834, + 0, + 0, + 835, + 0, + 0, + 0, + 841, + 0, + 844, + 0, + 850, + 851, + 859, + 0, + 860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 874, + 0, + 876, + 0, + 877, + 890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 893, + 894, + 898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 899, + 0, + 0, + 0, + 900, + 904, + 906, + 0, + 0, + 0, + 907, + 0, + 908, + 909, + 0, + 910, + 0, + 0, + 0, + 0, + 911, + 0, + 0, + 0, + 0, + 0, + 916, + 0, + 0, + 0, + 922, + 925, + 0, + 930, + 0, + 934, + 0, + 0, + 0, + 0, + 0, + 943, + 0, + 0, + 944, + 0, + 953, + 954, + 0, + 0, + 0, + 0, + 0, + 0, + 955, + 0, + 962, + 963, + 0, + 0, + 976, + 0, + 0, + 977, + 978, + 979, + 980, + 0, + 981, + 0, + 0, + 0, + 0, + 984, + 0, + 0, + 985, + 0, + 0, + 987, + 989, + 991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 992, + 0, + 0, + 0, + 993, + 0, + 0, + 0, + 0, + 0, + 0, + 996, + 0, + 0, + 0, + 1000, + 0, + 0, + 0, + 0, + 0, + 1002, + 0, + 0, + 0, + 0, + 1005, + 1007, + 0, + 0, + 0, + 1009, + 0, + 0, + 0, + 1010, + 0, + 0, + 0, + 0, + 0, + 0, + 1011, + 0, + 1012, + 0, + 0, + 0, + 0, + 1014, + 1016, + 0, + 0, + 0, + 1020, + 0, + 1021, + 0, + 0, + 0, + 0, + 1022, + 0, + 0, + 0, + 1024, + 0, + 0, + 0, + 0, + 0, + 0, + 1025, + 0, + 0, + 1026, + 1027, + 0, + 0, + 0, + 0, + 0, + 1031, + 0, + 1033, + 0, + 0, + 0, + 0, + 1034, + 0, + 0, + 0, + 1037, + 1040, + 0, + 0, + 0, + 1042, + 1043, + 0, + 0, + 1053, + 0, + 1054, + 0, + 0, + 1057, + 0, + 0, + 0, + 1058, + 0, + 0, + 1060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1061, + 0, + 0, + 1062, + 0, + 0, + 0, + 0, + 1063, + 0, + 0, + 0, + 0, + 1064, + 0, + 0, + 0, + 0, + 0, + 1065, + 0, + 0, + 0, + 0, + 1066, + 1067, + 0, + 0, + 0, + 1069, + 1070, + 1072, + 0, + 0, + 0, + 0, + 0, + 0, + 1073, + 0, + 1075, + 0, + 0, + 0, + 0, + 0, + 0, + 1080, + 1084, + 0, + 0, + 0, + 0, + 1088, + 0, + 0, + 0, + 0, + 0, + 0, + 1094, + 0, + 1095, + 0, + 1107, + 0, + 0, + 0, + 1112, + 1114, + 0, + 1119, + 0, + 1122, + 0, + 0, + 1126, + 0, + 1129, + 0, + 1130, + 0, + 0, + 0, + 0, + 0, + 1132, + 0, + 0, + 0, + 0, + 0, + 0, + 1144, + 0, + 0, + 1145, + 1146, + 0, + 1148, + 1149, + 0, + 0, + 1150, + 1151, + 0, + 0, + 0, + 0, + 1152, + 0, + 1153, + 0, + 0, + 0, + 0, + 0, + 1154, + 0, + 1163, + 0, + 0, + 0, + 1164, + 0, + 0, + 0, + 0, + 0, + 1165, + 0, + 1167, + 0, + 1170, + 0, + 0, + 0, + 0, + 0, + 1171, + 1172, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1173, + 1175, + 1177, + 0, + 1186, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1195, + 0, + 0, + 1221, + 0, + 0, + 1224, + 0, + 0, + 1227, + 0, + 0, + 0, + 0, + 0, + 1228, + 1229, + 0, + 0, + 1230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1231, + 0, + 0, + 0, + 1233, + 0, + 0, + 1243, + 1244, + 1246, + 1248, + 0, + 0, + 0, + 0, + 1254, + 1255, + 1258, + 1259, + 0, + 0, + 0, + 1260, + 0, + 0, + 1261, + 0, + 0, + 0, + 1262, + 1264, + 0, + 0, + 1265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1266, + 0, + 1267, + 0, + 0, + 0, + 0, + 1273, + 1274, + 1276, + 1289, + 0, + 0, + 1291, + 1292, + 1293, + 0, + 0, + 1294, + 1295, + 1296, + 0, + 0, + 0, + 0, + 1302, + 0, + 1304, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1311, + 1312, + 0, + 1314, + 0, + 1316, + 1320, + 1321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1322, + 1323, + 1324, + 0, + 1335, + 0, + 1336, + 0, + 0, + 0, + 0, + 1341, + 1342, + 0, + 1346, + 0, + 1357, + 0, + 0, + 0, + 1358, + 1360, + 0, + 0, + 0, + 0, + 0, + 0, + 1361, + 0, + 0, + 0, + 1362, + 1365, + 0, + 1366, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1379, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1386, + 0, + 1388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1395, + 0, + 0, + 0, + 0, + 1403, + 0, + 1405, + 0, + 0, + 1407, + 0, + 0, + 0, + 0, + 0, + 1408, + 1409, + 0, + 1410, + 0, + 0, + 0, + 1412, + 1413, + 1416, + 0, + 0, + 1429, + 1451, + 0, + 0, + 1454, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1455, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1456, + 0, + 0, + 0, + 0, + 1459, + 1460, + 1461, + 1475, + 0, + 0, + 0, + 0, + 0, + 0, + 1477, + 0, + 1480, + 0, + 1481, + 0, + 0, + 1486, + 0, + 0, + 1495, + 0, + 0, + 0, + 1496, + 0, + 0, + 1498, + 1499, + 1501, + 1520, + 1521, + 0, + 0, + 0, + 1526, + 0, + 0, + 0, + 0, + 1528, + 1529, + 0, + 1533, + 1536, + 0, + 0, + 0, + 1537, + 1538, + 1549, + 0, + 1550, + 1558, + 1559, + 1572, + 0, + 1573, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1575, + 0, + 0, + 0, + 0, + 0, + 1579, + 0, + 1599, + 0, + 1603, + 0, + 1604, + 0, + 1605, + 0, + 0, + 0, + 0, + 0, + 1608, + 1610, + 0, + 0, + 0, + 0, + 1611, + 0, + 1615, + 0, + 1616, + 1618, + 0, + 1619, + 0, + 0, + 1622, + 0, + 0, + 0, + 0, + 1634, + 0, + 0, + 0, + 1635, + 0, + 0, + 0, + 1641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1643, + 0, + 0, + 0, + 1650, + 0, + 0, + 1652, + 0, + 0, + 0, + 0, + 0, + 1653, + 0, + 0, + 0, + 1654, + 0, + 0, + 0, + 0, + 1655, + 0, + 1662, + 0, + 0, + 1663, + 1664, + 0, + 0, + 1668, + 0, + 0, + 1669, + 1670, + 0, + 1672, + 1673, + 0, + 0, + 0, + 0, + 0, + 1674, + 0, + 0, + 0, + 1675, + 1676, + 1680, + 0, + 1682, + 0, + 0, + 1687, + 0, + 0, + 0, + 0, + 0, + 1704, + 0, + 0, + 1705, + 0, + 0, + 1721, + 0, + 0, + 0, + 0, + 1734, + 1735, + 0, + 0, + 0, + 0, + 1737, + 0, + 0, + 0, + 0, + 1739, + 0, + 0, + 1740, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1741, + 1743, + 0, + 0, + 0, + 0, + 1745, + 0, + 0, + 0, + 1749, + 0, + 0, + 0, + 1751, + 0, + 0, + 0, + 0, + 0, + 0, + 1760, + 0, + 0, + 0, + 0, + 1765, + 0, + 0, + 0, + 0, + 0, + 1784, + 0, + 1785, + 1787, + 0, + 0, + 0, + 0, + 1788, + 1789, + 0, + 0, + 0, + 0, + 1790, + 1791, + 1793, + 0, + 1798, + 1799, + 0, + 0, + 0, + 0, + 1801, + 0, + 1803, + 1805, + 0, + 0, + 0, + 1806, + 1811, + 0, + 1812, + 1814, + 0, + 1821, + 0, + 0, + 0, + 0, + 0, + 1822, + 1833, + 0, + 0, + 0, + 0, + 0, + 0, + 1848, + 0, + 0, + 0, + 0, + 0, + 0, + 1857, + 0, + 0, + 0, + 1859, + 0, + 0, + 0, + 0, + 1861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1866, + 0, + 1921, + 1925, + 0, + 0, + 0, + 1929, + 1930, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1931, + 0, + 0, + 0, + 0, + 1932, + 0, + 0, + 0, + 1934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1946, + 0, + 0, + 1948, + 0, + 0, + 0, + 0, + 1950, + 0, + 1957, + 0, + 1958, + 0, + 0, + 0, + 0, + 0, + 1965, + 1967, + 0, + 0, + 0, + 0, + 1968, + 0, + 1969, + 0, + 1971, + 1972, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1973, + 0, + 0, + 0, + 0, + 1975, + 0, + 0, + 0, + 0, + 1976, + 1979, + 0, + 1982, + 0, + 0, + 0, + 0, + 1984, + 1988, + 0, + 0, + 0, + 0, + 1990, + 2004, + 2008, + 0, + 0, + 0, + 2012, + 2013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2015, + 0, + 2016, + 2017, + 0, + 0, + 0, + 0, + 2021, + 0, + 0, + 2025, + 0, + 0, + 0, + 0, + 0, + 2029, + 2036, + 2040, + 0, + 2042, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2043, + 0, + 0, + 0, + 0, + 0, + 2045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2046, + 2047, + 0, + 2048, + 2049, + 0, + 2059, + 0, + 0, + 2063, + 0, + 2064, + 2065, + 0, + 0, + 2066, + 0, + 0, + 0, + 0, + 0, + 0, + 2069, + 0, + 0, + 0, + 0, + 2070, + 0, + 2071, + 0, + 2072, + 0, + 0, + 0, + 0, + 2080, + 2082, + 2083, + 0, + 0, + 0, + 0, + 0, + 2085, + 0, + 2086, + 2088, + 2089, + 2105, + 0, + 0, + 0, + 0, + 2107, + 0, + 0, + 2116, + 2117, + 0, + 2120, + 0, + 0, + 2122, + 0, + 0, + 0, + 0, + 0, + 2123, + 0, + 0, + 2125, + 2127, + 2128, + 0, + 0, + 0, + 2130, + 0, + 0, + 0, + 2137, + 2139, + 2140, + 2141, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2144, + 2145, + 0, + 0, + 2146, + 2149, + 0, + 0, + 0, + 0, + 2150, + 0, + 0, + 2151, + 2158, + 0, + 2159, + 0, + 2160, + 0, + 0, + 0, + 0, + 0, + 0, + 2161, + 2162, + 0, + 0, + 2194, + 2202, + 0, + 0, + 0, + 0, + 0, + 0, + 2205, + 2217, + 0, + 2220, + 0, + 2221, + 0, + 2222, + 2224, + 0, + 0, + 0, + 0, + 2237, + 0, + 0, + 0, + 0, + 0, + 2238, + 0, + 2239, + 2241, + 0, + 0, + 2242, + 0, + 0, + 0, + 0, + 0, + 2243, + 0, + 0, + 0, + 0, + 0, + 0, + 2252, + 0, + 0, + 2253, + 0, + 0, + 0, + 2257, + 2258, + 0, + 0, + 0, + 2260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2262, + 0, + 2264, + 0, + 0, + 0, + 0, + 0, + 2269, + 2270, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2271, + 0, + 2273, + 0, + 0, + 0, + 0, + 2277, + 0, + 0, + 0, + 0, + 2278, + 0, + 0, + 0, + 0, + 2279, + 0, + 2280, + 0, + 2283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2289, + 2290, + 0, + 0, + 0, + 0, + 2291, + 0, + 2292, + 0, + 0, + 0, + 2293, + 2295, + 2296, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2298, + 0, + 0, + 0, + 0, + 0, + 2303, + 0, + 2305, + 0, + 0, + 2306, + 0, + 2307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2313, + 2314, + 2315, + 2316, + 0, + 0, + 2318, + 0, + 2319, + 0, + 2322, + 0, + 0, + 2323, + 0, + 2324, + 0, + 2326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2335, + 0, + 2336, + 2338, + 2339, + 0, + 2340, + 0, + 0, + 0, + 2355, + 0, + 2375, + 0, + 2382, + 2386, + 0, + 2387, + 0, + 0, + 2394, + 0, + 0, + 0, + 0, + 2395, + 0, + 2397, + 0, + 0, + 0, + 0, + 0, + 2398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2399, + 2402, + 2404, + 2408, + 2411, + 0, + 0, + 0, + 2413, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2415, + 0, + 0, + 2416, + 2417, + 2419, + 0, + 2420, + 0, + 0, + 0, + 0, + 0, + 2425, + 0, + 0, + 0, + 2426, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2427, + 2428, + 0, + 2429, + 0, + 0, + 2430, + 2434, + 0, + 2436, + 0, + 0, + 0, + 0, + 0, + 0, + 2441, + 2442, + 0, + 2445, + 0, + 0, + 2446, + 2457, + 0, + 2459, + 0, + 0, + 2462, + 0, + 2464, + 0, + 2477, + 0, + 2478, + 2486, + 0, + 0, + 0, + 2491, + 0, + 0, + 2493, + 0, + 0, + 2494, + 0, + 2495, + 0, + 2513, + 2523, + 0, + 0, + 0, + 0, + 2524, + 0, + 0, + 0, + 0, + 0, + 0, + 2528, + 2529, + 2530, + 0, + 0, + 2531, + 0, + 2533, + 0, + 0, + 2534, + 2535, + 0, + 2536, + 2537, + 0, + 2538, + 0, + 2539, + 2540, + 0, + 0, + 0, + 2545, + 2546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2548, + 0, + 0, + 2549, + 0, + 2550, + 2555, + 0, + 0, + 0, + 0, + 0, + 2557, + 0, + 2560, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2561, + 0, + 2576, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2577, + 2578, + 0, + 0, + 0, + 2579, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2580, + 0, + 0, + 0, + 0, + 2581, + 0, + 0, + 0, + 0, + 2583, + 0, + 2584, + 0, + 2588, + 2590, + 0, + 0, + 0, + 2591, + 0, + 0, + 0, + 0, + 2593, + 2594, + 0, + 2595, + 0, + 2601, + 2602, + 0, + 0, + 2603, + 0, + 2605, + 0, + 0, + 0, + 2606, + 2607, + 2611, + 0, + 2615, + 0, + 0, + 0, + 2617, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2619, + 0, + 0, + 2620, + 0, + 0, + 0, + 2621, + 0, + 2623, + 0, + 2625, + 0, + 0, + 2628, + 2629, + 0, + 0, + 2635, + 2636, + 2637, + 0, + 0, + 2639, + 0, + 0, + 0, + 2642, + 0, + 0, + 0, + 0, + 2643, + 0, + 2644, + 0, + 2649, + 0, + 0, + 0, + 0, + 0, + 0, + 2655, + 2656, + 0, + 0, + 2657, + 0, + 0, + 0, + 0, + 0, + 2658, + 0, + 0, + 0, + 0, + 0, + 2659, + 0, + 0, + 0, + 0, + 2664, + 2685, + 0, + 2687, + 0, + 2688, + 0, + 0, + 2689, + 0, + 0, + 2694, + 0, + 2695, + 0, + 0, + 2698, + 0, + 2701, + 2706, + 0, + 0, + 0, + 2707, + 0, + 2709, + 2710, + 2711, + 0, + 0, + 0, + 2720, + 2730, + 2735, + 0, + 0, + 0, + 0, + 2738, + 2740, + 0, + 0, + 0, + 0, + 2747, + 0, + 0, + 0, + 0, + 0, + 0, + 2748, + 0, + 0, + 2749, + 0, + 0, + 0, + 0, + 0, + 2750, + 0, + 0, + 2752, + 2754, + 0, + 0, + 0, + 0, + 0, + 2758, + 0, + 0, + 0, + 0, + 2762, + 0, + 0, + 0, + 0, + 2763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2764, + 2767, + 0, + 0, + 0, + 0, + 2768, + 0, + 0, + 2770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2772, + 0, + 0, + 0, + 0, + 0, + 2773, + 2776, + 0, + 0, + 2783, + 0, + 0, + 2784, + 0, + 2789, + 0, + 2790, + 0, + 0, + 0, + 2792, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2793, + 2795, + 0, + 0, + 0, + 0, + 0, + 0, + 2796, + 0, + 0, + 0, + 0, + 0, + 0, + 2797, + 2799, + 0, + 0, + 0, + 0, + 2803, + 0, + 0, + 0, + 0, + 2806, + 0, + 2807, + 2808, + 2817, + 2819, + 0, + 0, + 0, + 0, + 0, + 2821, + 0, + 0, + 0, + 0, + 2822, + 2823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2824, + 0, + 0, + 2828, + 0, + 2834, + 0, + 0, + 0, + 0, + 0, + 0, + 2836, + 0, + 2838, + 0, + 0, + 2839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2841, + 0, + 0, + 0, + 2842, + 0, + 0, + 0, + 0, + 0, + 2843, + 2844, + 0, + 0, + 0, + 0, + 2846, + 0, + 0, + 2847, + 0, + 2849, + 0, + 2853, + 0, + 0, + 0, + 0, + 0, + 2857, + 0, + 0, + 0, + 0, + 2858, + 0, + 2859, + 0, + 0, + 2860, + 0, + 2862, + 2868, + 0, + 0, + 0, + 0, + 2875, + 0, + 2876, + 0, + 0, + 2877, + 2878, + 2884, + 2889, + 2890, + 0, + 0, + 2891, + 0, + 0, + 2892, + 0, + 0, + 0, + 2906, + 2912, + 0, + 2913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2916, + 0, + 2934, + 0, + 0, + 0, + 0, + 0, + 2935, + 0, + 0, + 0, + 0, + 2939, + 0, + 2940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2941, + 0, + 0, + 0, + 2946, + 0, + 2949, + 0, + 0, + 2950, + 2954, + 2955, + 0, + 0, + 0, + 2959, + 2961, + 0, + 0, + 2962, + 0, + 2963, + 0, + 0, + 0, + 0, + 0, + 0, + 2964, + 2965, + 2966, + 2967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2969, + 0, + 0, + 0, + 0, + 0, + 2970, + 2975, + 0, + 2982, + 2983, + 2984, + 0, + 0, + 0, + 0, + 0, + 2989, + 0, + 0, + 2990, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2998, + 0, + 3000, + 3001, + 0, + 0, + 3002, + 0, + 0, + 0, + 3003, + 0, + 0, + 3012, + 0, + 0, + 3022, + 0, + 0, + 3024, + 0, + 0, + 3025, + 3027, + 0, + 0, + 0, + 3030, + 0, + 0, + 0, + 0, + 3034, + 3035, + 0, + 0, + 3036, + 0, + 3039, + 0, + 3049, + 0, + 0, + 3050, + 0, + 0, + 0, + 0, + 0, + 0, + 3051, + 0, + 3053, + 0, + 0, + 0, + 0, + 3057, + 0, + 3058, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3063, + 0, + 0, + 3073, + 3074, + 3078, + 3079, + 0, + 3080, + 3086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3087, + 0, + 3092, + 0, + 3095, + 0, + 3099, + 0, + 0, + 0, + 3100, + 0, + 3101, + 3102, + 0, + 3122, + 0, + 0, + 0, + 3124, + 0, + 3125, + 0, + 0, + 0, + 0, + 0, + 0, + 3132, + 3134, + 0, + 0, + 3136, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3147, + 0, + 0, + 3149, + 0, + 0, + 0, + 0, + 0, + 3150, + 3151, + 3152, + 0, + 0, + 0, + 0, + 3158, + 0, + 0, + 3160, + 0, + 0, + 3161, + 0, + 0, + 3162, + 0, + 3163, + 3166, + 3168, + 0, + 0, + 3169, + 3170, + 0, + 0, + 3171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3182, + 0, + 3184, + 0, + 0, + 3188, + 0, + 0, + 3194, + 0, + 0, + 0, + 0, + 0, + 0, + 3204, + 0, + 0, + 0, + 0, + 3209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3216, + 3217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3219, + 0, + 0, + 3220, + 3222, + 0, + 3223, + 0, + 0, + 0, + 0, + 3224, + 0, + 3225, + 3226, + 0, + 3228, + 3233, + 0, + 3239, + 3241, + 3242, + 0, + 0, + 3251, + 3252, + 3253, + 3255, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3260, + 0, + 0, + 3261, + 0, + 0, + 0, + 3267, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3271, + 0, + 0, + 0, + 3278, + 0, + 3282, + 0, + 0, + 0, + 3284, + 0, + 0, + 0, + 3285, + 3286, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3287, + 3292, + 0, + 0, + 0, + 0, + 3294, + 3296, + 0, + 0, + 3299, + 3300, + 3301, + 0, + 3302, + 0, + 0, + 0, + 0, + 0, + 3304, + 3306, + 0, + 0, + 0, + 0, + 0, + 0, + 3308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3312, + 3314, + 3315, + 0, + 3318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3319, + 0, + 0, + 0, + 0, + 0, + 3321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3322, + 0, + 0, + 3324, + 3325, + 0, + 0, + 3326, + 0, + 0, + 3328, + 3329, + 3331, + 0, + 0, + 3335, + 0, + 0, + 3337, + 0, + 3338, + 0, + 0, + 0, + 0, + 3343, + 3347, + 0, + 0, + 0, + 3348, + 0, + 0, + 3351, + 0, + 0, + 0, + 0, + 0, + 0, + 3354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3355, + 0, + 0, + 3365, + 3366, + 3367, + 0, + 0, + 0, + 0, + 0, + 0, + 3368, + 3369, + 0, + 3370, + 0, + 0, + 3373, + 0, + 0, + 3376, + 0, + 0, + 3377, + 0, + 3379, + 3387, + 0, + 0, + 0, + 0, + 0, + 3390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3402, + 0, + 3403, + 3436, + 3437, + 3439, + 0, + 0, + 3441, + 0, + 0, + 0, + 3442, + 0, + 0, + 3449, + 0, + 0, + 0, + 3450, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3451, + 0, + 0, + 3452, + 0, + 3453, + 3456, + 0, + 3457, + 0, + 0, + 3458, + 0, + 3459, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3460, + 0, + 0, + 3469, + 3470, + 0, + 0, + 3475, + 0, + 0, + 0, + 3480, + 3487, + 3489, + 0, + 3490, + 0, + 0, + 3491, + 3499, + 0, + 3500, + 0, + 0, + 3501, + 0, + 0, + 0, + 3502, + 0, + 3514, + 0, + 0, + 0, + 3516, + 3517, + 0, + 0, + 0, + 3518, + 0, + 0, + 0, + 0, + 3520, + 3521, + 3522, + 0, + 0, + 3526, + 3530, + 0, + 0, + 0, + 0, + 3531, + 0, + 0, + 0, + 0, + 3536, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3539, + 3541, + 0, + 0, + 3542, + 3544, + 0, + 3547, + 3548, + 0, + 0, + 3550, + 0, + 3553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3554, + 0, + 3555, + 0, + 3558, + 0, + 3559, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3563, + 0, + 3581, + 0, + 0, + 0, + 3599, + 0, + 0, + 0, + 3600, + 0, + 3601, + 0, + 3602, + 3603, + 0, + 0, + 3606, + 3608, + 0, + 3610, + 3611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3612, + 3616, + 3619, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3624, + 3628, + 0, + 3629, + 3634, + 3635, + 0, + 0, + 0, + 0, + 0, + 0, + 3636, + 0, + 3637, + 0, + 0, + 3638, + 3651, + 0, + 0, + 0, + 0, + 0, + 0, + 3652, + 3653, + 0, + 0, + 0, + 0, + 3656, + 3657, + 0, + 0, + 0, + 0, + 0, + 3658, + 0, + 0, + 0, + 0, + 3659, + 0, + 3661, + 3663, + 3664, + 0, + 3665, + 0, + 3692, + 0, + 0, + 0, + 3694, + 3696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3700, + 0, + 0, + 3701, + 0, + 0, + 0, + 3708, + 3709, + 0, + 0, + 0, + 3711, + 3712, + 0, + 0, + 0, + 0, + 0, + 3723, + 0, + 3724, + 3725, + 0, + 0, + 3726, + 0, + 0, + 0, + 0, + 0, + 0, + 3728, + 3729, + 0, + 3734, + 3735, + 3737, + 0, + 0, + 0, + 3743, + 0, + 3745, + 0, + 0, + 3746, + 0, + 0, + 3747, + 3748, + 0, + 3757, + 0, + 3759, + 3766, + 3767, + 0, + 3768, + 0, + 0, + 0, + 0, + 3769, + 0, + 0, + 3771, + 0, + 3774, + 0, + 0, + 0, + 0, + 0, + 0, + 3775, + 0, + 0, + 0, + 0, + 0, + 0, + 3776, + 0, + 3777, + 3786, + 0, + 3788, + 3789, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3791, + 0, + 3811, + 0, + 0, + 0, + 0, + 0, + 3814, + 3815, + 3816, + 3820, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3821, + 0, + 0, + 3825, + 0, + 0, + 0, + 0, + 3835, + 0, + 0, + 3848, + 3849, + 0, + 0, + 0, + 0, + 3850, + 3851, + 3853, + 0, + 0, + 0, + 0, + 3859, + 0, + 3860, + 3862, + 0, + 0, + 0, + 0, + 0, + 3863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3873, + 0, + 3874, + 0, + 3875, + 3886, + 0, + 3887, + 0, + 0, + 0, + 0, + 3892, + 3913, + 0, + 3914, + 0, + 0, + 0, + 3925, + 3931, + 0, + 0, + 0, + 0, + 3934, + 3941, + 3942, + 0, + 0, + 0, + 0, + 3943, + 0, + 0, + 0, + 3944, + 0, + 0, + 0, + 0, + 0, + 3945, + 0, + 3947, + 0, + 0, + 0, + 3956, + 3957, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3958, + 0, + 3959, + 3965, + 0, + 0, + 0, + 0, + 3966, + 0, + 0, + 0, + 3967, + 0, + 0, + 0, + 3968, + 3974, + 0, + 0, + 0, + 0, + 0, + 3975, + 3977, + 3978, + 0, + 0, + 0, + 0, + 3980, + 0, + 3985, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3986, + 4011, + 0, + 0, + 4017, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4018, + 0, + 0, + 0, + 0, + 4019, + 0, + 4023, + 0, + 0, + 0, + 4027, + 4028, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4031, + 4034, + 0, + 0, + 4035, + 4037, + 4039, + 4040, + 0, + 0, + 0, + 0, + 0, + 4059, + 0, + 4060, + 4061, + 0, + 4062, + 4063, + 4066, + 0, + 0, + 4072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4088, + 0, + 0, + 0, + 0, + 0, + 4091, + 0, + 0, + 0, + 0, + 4094, + 4095, + 0, + 0, + 4096, + 0, + 0, + 0, + 0, + 0, + 4098, + 4099, + 0, + 0, + 0, + 4101, + 0, + 4104, + 0, + 0, + 0, + 4105, + 4108, + 0, + 4113, + 0, + 0, + 4115, + 4116, + 0, + 4126, + 0, + 0, + 4127, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4128, + 4132, + 4133, + 0, + 4134, + 0, + 0, + 0, + 4137, + 0, + 0, + 4141, + 0, + 0, + 0, + 0, + 4144, + 4146, + 4147, + 0, + 0, + 0, + 0, + 4148, + 0, + 0, + 4311, + 0, + 0, + 0, + 4314, + 4329, + 0, + 4331, + 4332, + 0, + 4333, + 0, + 4334, + 0, + 0, + 0, + 4335, + 0, + 4336, + 0, + 0, + 0, + 4337, + 0, + 0, + 0, + 4342, + 4345, + 4346, + 4350, + 0, + 4351, + 4352, + 0, + 4354, + 4355, + 0, + 0, + 4364, + 0, + 0, + 0, + 0, + 4369, + 0, + 0, + 0, + 4373, + 0, + 4374, + 0, + 0, + 0, + 0, + 4377, + 0, + 0, + 0, + 0, + 4378, + 0, + 0, + 0, + 4380, + 0, + 0, + 0, + 4381, + 4382, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4384, + 0, + 0, + 0, + 0, + 4385, + 0, + 0, + 0, + 4386, + 0, + 0, + 0, + 4391, + 4398, + 0, + 0, + 0, + 0, + 4407, + 4409, + 0, + 0, + 0, + 0, + 4410, + 0, + 0, + 4411, + 0, + 4414, + 4415, + 4418, + 0, + 4427, + 4428, + 4430, + 0, + 4431, + 0, + 4448, + 0, + 0, + 0, + 0, + 0, + 4449, + 0, + 0, + 0, + 4451, + 4452, + 0, + 4453, + 4454, + 0, + 4456, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4459, + 0, + 4463, + 0, + 0, + 0, + 0, + 0, + 4466, + 0, + 4467, + 0, + 4469, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4470, + 4471, + 0, + 4473, + 0, + 0, + 4475, + 0, + 0, + 0, + 0, + 4477, + 4478, + 0, + 0, + 0, + 4479, + 4481, + 0, + 4482, + 0, + 4484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4486, + 0, + 0, + 4488, + 0, + 0, + 4497, + 0, + 4508, + 0, + 0, + 4510, + 4511, + 0, + 4520, + 4523, + 0, + 4524, + 0, + 4525, + 0, + 4527, + 0, + 0, + 4528, + 0, + 0, + 0, + 0, + 4530, + 0, + 4531, + 0, + 0, + 4532, + 0, + 0, + 0, + 4533, + 0, + 0, + 0, + 0, + 0, + 4535, + 0, + 0, + 0, + 4536, + 0, + 0, + 0, + 0, + 0, + 4541, + 4543, + 4544, + 4545, + 4547, + 0, + 4548, + 0, + 0, + 0, + 0, + 4550, + 4551, + 0, + 4553, + 0, + 0, + 0, + 0, + 4562, + 0, + 0, + 4571, + 0, + 0, + 0, + 4574, + 0, + 0, + 0, + 4575, + 0, + 4576, + 0, + 4577, + 0, + 0, + 0, + 4581, + 0, + 0, + 0, + 0, + 0, + 4582, + 0, + 0, + 4586, + 0, + 0, + 0, + 4588, + 0, + 0, + 4597, + 0, + 4598, + 0, + 0, + 0, + 0, + 4616, + 4617, + 0, + 4618, + 0, + 0, + 0, + 0, + 4619, + 0, + 4620, + 0, + 0, + 4621, + 0, + 4624, + 0, + 0, + 0, + 0, + 0, + 4625, + 0, + 0, + 0, + 0, + 4657, + 0, + 4659, + 0, + 4667, + 0, + 0, + 0, + 4668, + 4670, + 0, + 4672, + 0, + 0, + 0, + 0, + 0, + 4673, + 4676, + 0, + 0, + 0, + 0, + 4687, + 0, + 0, + 0, + 0, + 4697, + 0, + 0, + 0, + 0, + 4699, + 0, + 4701, + 0, + 0, + 0, + 0, + 4702, + 0, + 0, + 4706, + 0, + 0, + 4713, + 0, + 0, + 0, + 4714, + 4715, + 4716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4717, + 0, + 0, + 4720, + 0, + 4721, + 4729, + 4735, + 0, + 0, + 0, + 4737, + 0, + 0, + 0, + 4739, + 0, + 0, + 0, + 4740, + 0, + 0, + 0, + 4741, + 0, + 0, + 0, + 0, + 0, + 4742, + 0, + 4745, + 4746, + 4747, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4748, + 0, + 0, + 0, + 4749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4751, + 4786, + 0, + 4787, + 0, + 4788, + 4796, + 0, + 0, + 4797, + 4798, + 0, + 4799, + 4806, + 4807, + 0, + 0, + 0, + 0, + 4809, + 4810, + 0, + 0, + 0, + 0, + 0, + 0, + 4811, + 0, + 0, + 0, + 0, + 0, + 4812, + 0, + 4813, + 0, + 0, + 4815, + 0, + 4821, + 4822, + 0, + 0, + 0, + 0, + 4823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4824, + 0, + 0, + 0, + 0, + 4826, + 0, + 0, + 0, + 4828, + 0, + 4829, + 0, + 0, + 0, + 4843, + 0, + 0, + 4847, + 0, + 4853, + 4855, + 4858, + 0, + 0, + 0, + 0, + 0, + 4859, + 0, + 4864, + 0, + 0, + 4879, + 0, + 0, + 0, + 0, + 4880, + 0, + 0, + 0, + 0, + 4881, + 0, + 4882, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4883, + 0, + 0, + 0, + 0, + 4884, + 0, + 0, + 0, + 0, + 0, + 4886, + 4887, + 4888, + 4894, + 4896, + 0, + 4902, + 0, + 0, + 4905, + 0, + 0, + 4915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4916, + 4917, + 4919, + 4921, + 0, + 0, + 0, + 0, + 0, + 4926, + 0, + 0, + 0, + 0, + 4927, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4929, + 0, + 4930, + 4931, + 0, + 4938, + 0, + 4952, + 0, + 4953, + 4957, + 4960, + 4964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5019, + 5020, + 5022, + 0, + 0, + 0, + 0, + 0, + 5023, + 0, + 0, + 0, + 5024, + 0, + 0, + 0, + 5025, + 0, + 0, + 0, + 0, + 5028, + 0, + 0, + 0, + 0, + 5029, + 5030, + 5031, + 0, + 5033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5034, + 5035, + 0, + 5036, + 0, + 0, + 5037, + 0, + 0, + 0, + 0, + 5038, + 0, + 0, + 5039, + 0, + 0, + 0, + 5041, + 5042, + 0, + 0, + 0, + 0, + 5044, + 5049, + 5054, + 0, + 5055, + 0, + 5057, + 0, + 0, + 0, + 5060, + 0, + 0, + 0, + 0, + 0, + 5063, + 0, + 5064, + 5065, + 0, + 5067, + 0, + 0, + 0, + 5068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5076, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5077, + 0, + 0, + 5078, + 5080, + 0, + 0, + 5083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5085, + 0, + 0, + 0, + 0, + 0, + 0, + 5098, + 5099, + 5101, + 5105, + 5107, + 0, + 5108, + 0, + 5109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5110, + 0, + 0, + 0, + 0, + 0, + 5117, + 5118, + 0, + 5121, + 0, + 5122, + 0, + 0, + 5130, + 0, + 0, + 0, + 5137, + 0, + 0, + 0, + 5148, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5151, + 5154, + 0, + 0, + 0, + 5155, + 0, + 0, + 5156, + 5159, + 5161, + 0, + 0, + 0, + 0, + 5162, + 0, + 0, + 0, + 0, + 5163, + 5164, + 0, + 5166, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5167, + 0, + 0, + 0, + 5172, + 0, + 0, + 0, + 0, + 0, + 0, + 5178, + 5179, + 0, + 0, + 5190, + 0, + 0, + 5191, + 5192, + 5194, + 0, + 0, + 5198, + 5201, + 0, + 0, + 0, + 0, + 0, + 5203, + 0, + 5206, + 5209, + 0, + 0, + 0, + 0, + 0, + 0, + 5213, + 0, + 5214, + 5216, + 0, + 0, + 0, + 0, + 0, + 5217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5218, + 5219, + 0, + 5231, + 0, + 0, + 5244, + 5249, + 0, + 5254, + 0, + 5255, + 0, + 0, + 5257, + 0, + 0, + 0, + 0, + 0, + 5258, + 0, + 5260, + 5270, + 0, + 5277, + 0, + 0, + 0, + 0, + 0, + 0, + 5280, + 5281, + 5282, + 5283, + 0, + 0, + 0, + 0, + 0, + 5284, + 0, + 5285, + 0, + 0, + 0, + 0, + 0, + 5287, + 5288, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5289, + 5291, + 0, + 0, + 5294, + 0, + 0, + 5295, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5304, + 0, + 0, + 5306, + 5307, + 5308, + 0, + 5309, + 0, + 0, + 5310, + 0, + 0, + 0, + 0, + 5311, + 5312, + 0, + 5313, + 0, + 0, + 0, + 0, + 0, + 5316, + 0, + 0, + 0, + 5317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5325, + 0, + 0, + 0, + 0, + 0, + 0, + 5326, + 0, + 5327, + 5329, + 0, + 5332, + 0, + 0, + 0, + 0, + 5338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5340, + 0, + 0, + 5341, + 0, + 0, + 0, + 5342, + 0, + 5343, + 5344, + 0, + 0, + 5345, + 0, + 0, + 0, + 0, + 0, + 0, + 5347, + 5348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5349, + 0, + 5350, + 0, + 5354, + 0, + 0, + 0, + 0, + 5358, + 0, + 0, + 5359, + 0, + 0, + 5361, + 0, + 0, + 5365, + 0, + 5367, + 0, + 5373, + 0, + 0, + 0, + 5379, + 0, + 0, + 0, + 5380, + 0, + 0, + 0, + 5382, + 0, + 5384, + 0, + 0, + 0, + 0, + 0, + 0, + 5385, + 0, + 0, + 0, + 0, + 5387, + 0, + 0, + 0, + 0, + 0, + 0, + 5388, + 5390, + 5393, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5396, + 0, + 0, + 0, + 0, + 5397, + 5402, + 0, + 0, + 0, + 0, + 0, + 5403, + 0, + 0, + 0, + 5404, + 5405, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5406, + 0, + 0, + 0, + 0, + 5410, + 0, + 0, + 5411, + 0, + 5415, + 0, + 0, + 0, + 0, + 5416, + 5434, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5438, + 0, + 5440, + 0, + 0, + 0, + 0, + 0, + 0, + 5441, + 5442, + 0, + 0, + 0, + 5443, + 5444, + 5447, + 0, + 0, + 5448, + 5449, + 5451, + 0, + 0, + 0, + 5456, + 5457, + 0, + 0, + 0, + 5459, + 0, + 0, + 0, + 5461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5464, + 0, + 5466, + 0, + 0, + 5467, + 0, + 5470, + 0, + 0, + 5473, + 0, + 0, + 5474, + 0, + 0, + 5476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5477, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5484, + 0, + 0, + 5485, + 5486, + 0, + 0, + 0, + 0, + 0, + 5488, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5489, + 0, + 0, + 0, + 0, + 0, + 5507, + 0, + 0, + 0, + 5510, + 0, + 5511, + 0, + 0, + 5512, + 0, + 0, + 0, + 5513, + 0, + 5515, + 0, + 0, + 5516, + 5517, + 0, + 5518, + 0, + 0, + 5522, + 0, + 0, + 0, + 0, + 0, + 5534, + 5535, + 0, + 0, + 5536, + 0, + 5538, + 0, + 0, + 5543, + 0, + 5544, + 0, + 0, + 5545, + 0, + 5547, + 0, + 5557, + 0, + 0, + 5558, + 0, + 5560, + 5567, + 0, + 0, + 0, + 0, + 5568, + 0, + 0, + 0, + 5571, + 5573, + 0, + 5574, + 0, + 5575, + 0, + 0, + 0, + 0, + 5577, + 0, + 0, + 5598, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5600, + 5609, + 0, + 0, + 0, + 0, + 5610, + 0, + 0, + 5612, + 0, + 5624, + 0, + 5625, + 0, + 0, + 0, + 5629, + 0, + 5641, + 0, + 5642, + 5643, + 0, + 0, + 0, + 0, + 0, + 0, + 5651, + 0, + 0, + 0, + 5652, + 5653, + 0, + 5661, + 5662, + 5678, + 0, + 5679, + 0, + 0, + 0, + 0, + 5685, + 5686, + 0, + 0, + 0, + 0, + 0, + 5690, + 5692, + 0, + 5703, + 0, + 0, + 0, + 0, + 0, + 5706, + 0, + 0, + 0, + 0, + 5707, + 0, + 0, + 0, + 0, + 0, + 0, + 5708, + 0, + 0, + 5709, + 0, + 5710, + 0, + 0, + 0, + 5712, + 0, + 5733, + 0, + 5734, + 5735, + 0, + 0, + 5744, + 5751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5752, + 0, + 5754, + 0, + 0, + 0, + 0, + 0, + 0, + 5757, + 5758, + 0, + 5760, + 5761, + 0, + 0, + 0, + 0, + 5763, + 5764, + 5765, + 0, + 5766, + 0, + 5767, + 5768, + 0, + 5770, + 0, + 0, + 0, + 0, + 5776, + 5780, + 0, + 0, + 0, + 0, + 5782, + 0, + 0, + 0, + 0, + 5784, + 0, + 0, + 5788, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5797, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5799, + 0, + 0, + 5801, + 0, + 0, + 0, + 5811, + 0, + 0, + 0, + 0, + 0, + 0, + 5816, + 0, + 0, + 5827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5830, + 5831, + 0, + 0, + 5832, + 0, + 0, + 5833, + 0, + 5835, + 5844, + 5845, + 0, + 5846, + 0, + 0, + 0, + 0, + 0, + 5850, + 0, + 0, + 0, + 0, + 0, + 5852, + 0, + 5855, + 5857, + 0, + 0, + 5859, + 0, + 5861, + 0, + 0, + 5863, + 0, + 5865, + 0, + 0, + 0, + 5873, + 5875, + 0, + 0, + 0, + 5877, + 0, + 5879, + 0, + 0, + 0, + 5888, + 0, + 0, + 5889, + 5891, + 0, + 5894, + 0, + 0, + 0, + 0, + 0, + 0, + 5895, + 0, + 5897, + 0, + 0, + 0, + 0, + 0, + 0, + 5907, + 0, + 5911, + 0, + 0, + 5912, + 0, + 5913, + 5922, + 5924, + 0, + 5927, + 5928, + 0, + 0, + 0, + 0, + 5929, + 5930, + 0, + 5933, + 0, + 0, + 0, + 0, + 5949, + 0, + 0, + 5951, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5953, + 0, + 0, + 5954, + 0, + 5959, + 5960, + 5961, + 0, + 5964, + 0, + 0, + 0, + 5976, + 5978, + 5987, + 5990, + 0, + 0, + 0, + 0, + 0, + 5991, + 0, + 5992, + 0, + 0, + 0, + 5994, + 5995, + 0, + 0, + 5996, + 0, + 0, + 6001, + 6003, + 0, + 0, + 0, + 0, + 6007, + 0, + 0, + 0, + 0, + 0, + 6008, + 0, + 0, + 6009, + 0, + 6010, + 0, + 0, + 0, + 6011, + 6015, + 0, + 6017, + 0, + 6019, + 0, + 6023, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6026, + 0, + 6030, + 0, + 0, + 6032, + 0, + 0, + 0, + 6033, + 6038, + 6040, + 0, + 0, + 0, + 6041, + 6045, + 0, + 0, + 6046, + 0, + 0, + 6053, + 0, + 0, + 6054, + 0, + 6055, + 0, + 0, + 0, + 0, + 0, + 0, + 6057, + 0, + 6063, + 0, + 0, + 0, + 6064, + 0, + 6066, + 6071, + 6072, + 0, + 0, + 0, + 0, + 0, + 0, + 6075, + 6076, + 0, + 0, + 6077, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6078, + 6079, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6080, + 0, + 6083, + 0, + 0, + 0, + 0, + 0, + 6084, + 0, + 0, + 6088, + 0, + 6089, + 0, + 0, + 6093, + 6105, + 0, + 0, + 6107, + 0, + 6110, + 0, + 0, + 0, + 6111, + 6125, + 6126, + 0, + 0, + 0, + 6129, + 0, + 0, + 0, + 0, + 6130, + 0, + 0, + 0, + 6131, + 6134, + 0, + 0, + 0, + 0, + 0, + 0, + 6142, + 0, + 0, + 0, + 0, + 0, + 6144, + 0, + 0, + 6146, + 6151, + 6153, + 0, + 6156, + 0, + 6163, + 0, + 6180, + 6181, + 0, + 0, + 0, + 0, + 0, + 6182, + 0, + 0, + 0, + 0, + 6184, + 6195, + 0, + 0, + 6206, + 0, + 6208, + 0, + 0, + 6212, + 6213, + 6214, + 0, + 6215, + 0, + 0, + 0, + 6228, + 0, + 0, + 0, + 6234, + 0, + 0, + 0, + 0, + 0, + 0, + 6235, + 6240, + 0, + 6242, + 6243, + 6244, + 0, + 6250, + 6255, + 0, + 0, + 0, + 0, + 0, + 6257, + 0, + 0, + 0, + 6258, + 6278, + 0, + 6284, + 0, + 0, + 0, + 6285, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6286, + 0, + 0, + 0, + 6320, + 0, + 0, + 6322, + 6332, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6334, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6335, + 0, + 0, + 6337, + 0, + 6338, + 0, + 6339, + 6340, + 0, + 0, + 6356, + 6357, + 6369, + 0, + 0, + 0, + 6370, + 6371, + 6372, + 0, + 6373, + 0, + 0, + 0, + 0, + 0, + 6376, + 0, + 0, + 0, + 0, + 0, + 6382, + 6383, + 6384, + 0, + 0, + 0, + 0, + 6386, + 0, + 6389, + 6397, + 6400, + 6411, + 0, + 6414, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6415, + 6416, + 0, + 0, + 0, + 0, + 0, + 0, + 6417, + 0, + 0, + 0, + 0, + 6418, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6420, + 0, + 6421, + 6423, + 6425, + 0, + 6429, + 6430, + 0, + 6433, + 6438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6439, + 6440, + 0, + 0, + 6441, + 0, + 0, + 6444, + 0, + 0, + 0, + 0, + 6446, + 0, + 0, + 0, + 0, + 6447, + 6448, + 0, + 0, + 6450, + 0, + 0, + 0, + 6454, + 0, + 0, + 6455, + 0, + 6461, + 0, + 0, + 0, + 0, + 0, + 0, + 6462, + 0, + 0, + 6463, + 0, + 6464, + 0, + 6465, + 6467, + 0, + 0, + 0, + 6468, + 0, + 6479, + 6480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6481, + 0, + 0, + 6485, + 6487, + 0, + 0, + 0, + 0, + 0, + 0, + 6493, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6494, + 6495, + 6496, + 0, + 0, + 0, + 0, + 0, + 6498, + 0, + 0, + 0, + 6507, + 6508, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6511, + 6512, + 0, + 0, + 0, + 0, + 6513, + 0, + 0, + 0, + 6514, + 0, + 0, + 0, + 0, + 0, + 6516, + 0, + 0, + 6517, + 6518, + 0, + 0, + 0, + 6519, + 6520, + 6521, + 0, + 6523, + 0, + 0, + 0, + 0, + 6524, + 6528, + 0, + 6530, + 0, + 0, + 6532, + 0, + 6578, + 0, + 0, + 0, + 6583, + 0, + 6584, + 0, + 0, + 0, + 6587, + 0, + 0, + 0, + 6590, + 0, + 6591, + 0, + 0, + 0, + 0, + 0, + 6592, + 0, + 0, + 0, + 0, + 6593, + 6594, + 0, + 0, + 0, + 0, + 0, + 6599, + 6600, + 0, + 0, + 6601, + 6602, + 6604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6610, + 6611, + 0, + 6615, + 0, + 6616, + 6618, + 6620, + 0, + 6637, + 0, + 0, + 0, + 0, + 6639, + 0, + 0, + 0, + 0, + 6641, + 0, + 6642, + 0, + 0, + 0, + 6647, + 0, + 6660, + 6663, + 0, + 6664, + 0, + 6666, + 6669, + 0, + 6675, + 6676, + 6677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6678, + 0, + 0, + 0, + 6679, + 0, + 6680, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6693, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6704, + 6705, + 6706, + 0, + 0, + 6711, + 6713, + 0, + 0, + 0, + 0, + 0, + 6716, + 0, + 0, + 0, + 6717, + 0, + 6719, + 6724, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6725, + 6726, + 0, + 0, + 0, + 0, + 0, + 6728, + 6729, + 6735, + 0, + 6737, + 6742, + 0, + 0, + 6743, + 6750, + 0, + 6751, + 0, + 0, + 6752, + 6753, + 0, + 0, + 0, + 0, + 0, + 0, + 6754, + 0, + 0, + 0, + 0, + 0, + 6756, + 0, + 0, + 0, + 0, + 0, + 0, + 6763, + 0, + 0, + 6764, + 6765, + 0, + 0, + 0, + 6770, + 0, + 0, + 0, + 6776, + 6780, + 0, + 6781, + 0, + 0, + 0, + 6783, + 0, + 6784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6785, + 0, + 0, + 0, + 6792, + 0, + 0, + 0, + 6793, + 0, + 0, + 6802, + 0, + 0, + 0, + 0, + 0, + 6803, + 0, + 0, + 0, + 6804, + 0, + 0, + 0, + 6812, + 0, + 0, + 6823, + 0, + 6824, + 6839, + 0, + 0, + 0, + 0, + 6852, + 0, + 0, + 6854, + 0, + 6856, + 6857, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6867, + 0, + 6868, + 6870, + 6872, + 0, + 0, + 0, + 6873, + 6874, + 0, + 0, + 0, + 0, + 0, + 6875, + 0, + 0, + 6877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6878, + 0, + 0, + 0, + 6879, + 0, + 6880, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6887, + 0, + 6888, + 6891, + 6893, + 0, + 6895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6899, + 0, + 0, + 0, + 0, + 6901, + 0, + 0, + 0, + 0, + 6910, + 0, + 6911, + 0, + 0, + 6912, + 0, + 0, + 6913, + 6914, + 0, + 0, + 0, + 6915, + 0, + 0, + 0, + 6916, + 6919, + 0, + 0, + 0, + 0, + 0, + 0, + 6924, + 0, + 6925, + 0, + 0, + 0, + 6926, + 6927, + 6928, + 0, + 6929, + 0, + 6930, + 0, + 0, + 6931, + 6935, + 0, + 6936, + 0, + 0, + 0, + 0, + 6939, + 6940, + 6941, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6942, + 6948, + 6949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6952, + 6954, + 6963, + 6965, + 6966, + 0, + 0, + 6967, + 6968, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6969, + 0, + 0, + 6970, + 6979, + 0, + 0, + 6980, + 0, + 0, + 6983, + 0, + 0, + 0, + 0, + 0, + 6984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6988, + 6990, + 6992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6995, + 0, + 0, + 0, + 7012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7021, + 0, + 0, + 7022, + 7023, + 7028, + 0, + 7030, + 7033, + 0, + 0, + 0, + 0, + 0, + 0, + 7038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7039, + 0, + 0, + 0, + 0, + 0, + 7046, + 0, + 7047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7048, + 7052, + 0, + 0, + 0, + 0, + 0, + 7054, + 0, + 7060, + 0, + 0, + 0, + 0, + 7061, + 0, + 7065, + 0, + 0, + 0, + 0, + 7067, + 7069, + 0, + 7070, + 7071, + 7072, + 0, + 0, + 7078, + 0, + 7080, + 7081, + 0, + 7083, + 0, + 0, + 0, + 7084, + 7087, + 7088, + 0, + 0, + 7090, + 0, + 7093, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7107, + 0, + 0, + 7108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7110, + 0, + 7114, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7115, + 0, + 7116, + 0, + 0, + 0, + 0, + 0, + 7117, + 0, + 0, + 7118, + 0, + 0, + 7124, + 0, + 7125, + 0, + 0, + 7126, + 0, + 0, + 0, + 0, + 7128, + 0, + 0, + 0, + 0, + 0, + 7129, + 0, + 7130, + 0, + 7132, + 7133, + 0, + 0, + 7134, + 0, + 0, + 7139, + 0, + 7148, + 7150, + 0, + 0, + 0, + 0, + 7152, + 0, + 0, + 0, + 7153, + 7156, + 7157, + 0, + 0, + 0, + 0, + 0, + 7158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7163, + 7165, + 7169, + 0, + 7171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7172, + 0, + 7173, + 7181, + 0, + 0, + 0, + 0, + 0, + 7182, + 7185, + 0, + 0, + 0, + 0, + 7187, + 0, + 7201, + 7204, + 0, + 0, + 0, + 0, + 0, + 7206, + 7207, + 0, + 0, + 0, + 0, + 7211, + 7216, + 0, + 7218, + 0, + 0, + 0, + 0, + 7226, + 7228, + 7230, + 7232, + 7233, + 7235, + 7237, + 0, + 0, + 0, + 0, + 7238, + 7241, + 0, + 7242, + 0, + 0, + 7247, + 0, + 0, + 0, + 7266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7289, + 0, + 0, + 7290, + 7291, + 0, + 0, + 7292, + 0, + 7297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7300, + 0, + 7301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7302, + 0, + 0, + 0, + 0, + 7305, + 0, + 0, + 0, + 0, + 7307, + 0, + 7308, + 0, + 7310, + 0, + 7335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7337, + 0, + 7343, + 7347, + 0, + 0, + 0, + 0, + 0, + 7348, + 0, + 7349, + 7350, + 7352, + 7354, + 0, + 0, + 0, + 0, + 7357, + 0, + 7358, + 7366, + 0, + 7367, + 7368, + 0, + 0, + 7373, + 0, + 0, + 0, + 7374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7376, + 0, + 0, + 0, + 7377, + 0, + 0, + 0, + 0, + 0, + 7378, + 0, + 7379, + 7380, + 0, + 0, + 0, + 0, + 0, + 7383, + 0, + 0, + 7386, + 0, + 0, + 0, + 0, + 7398, + 0, + 0, + 0, + 7399, + 7400, + 0, + 7401, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7402, + 0, + 0, + 0, + 0, + 0, + 7405, + 0, + 0, + 0, + 0, + 0, + 7406, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7421, + 7427, + 7429, + 0, + 0, + 0, + 7435, + 0, + 0, + 7436, + 0, + 0, + 0, + 7437, + 0, + 0, + 0, + 0, + 0, + 0, + 7438, + 7443, + 0, + 7446, + 0, + 7448, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7456, + 0, + 0, + 0, + 0, + 0, + 7457, + 0, + 0, + 7461, + 0, + 0, + 0, + 0, + 0, + 7462, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7463, + 7466, + 7472, + 0, + 7476, + 0, + 0, + 7490, + 0, + 7491, + 0, + 0, + 7493, + 0, + 0, + 0, + 7498, + 7499, + 0, + 0, + 7508, + 0, + 0, + 0, + 0, + 0, + 7512, + 0, + 0, + 0, + 7513, + 7514, + 7516, + 0, + 0, + 0, + 0, + 7518, + 0, + 0, + 7519, + 7521, + 7522, + 0, + 0, + 0, + 7526, + 0, + 0, + 7529, + 0, + 0, + 7531, + 0, + 7536, + 0, + 7538, + 0, + 7539, + 0, + 0, + 7541, + 7542, + 7546, + 0, + 0, + 0, + 0, + 0, + 7547, + 0, + 7548, + 0, + 0, + 0, + 0, + 0, + 7550, + 0, + 0, + 7552, + 7553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7554, + 7563, + 0, + 7573, + 0, + 0, + 0, + 0, + 0, + 0, + 7574, + 7576, + 0, + 7578, + 7581, + 7583, + 0, + 0, + 0, + 7584, + 0, + 7587, + 0, + 0, + 0, + 0, + 0, + 7589, + 0, + 0, + 0, + 7594, + 0, + 0, + 7595, + 0, + 0, + 7600, + 7602, + 7610, + 0, + 0, + 0, + 0, + 0, + 7612, + 0, + 7613, + 7614, + 0, + 0, + 7615, + 0, + 0, + 7616, + 0, + 7620, + 0, + 7621, + 7622, + 0, + 7623, + 0, + 0, + 0, + 0, + 7626, + 0, + 0, + 0, + 0, + 7627, + 7629, + 7631, + 0, + 0, + 7633, + 0, + 0, + 0, + 0, + 0, + 7639, + 0, + 7640, + 7642, + 0, + 0, + 7643, + 0, + 0, + 0, + 0, + 7644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7645, + 0, + 0, + 0, + 0, + 0, + 7661, + 7662, + 7663, + 7665, + 0, + 7666, + 0, + 7667, + 0, + 7684, + 7688, + 7690, + 0, + 7691, + 0, + 0, + 0, + 0, + 0, + 0, + 7692, + 0, + 0, + 7700, + 0, + 7707, + 0, + 7708, + 0, + 7709, + 0, + 7721, + 0, + 0, + 0, + 7722, + 0, + 7724, + 0, + 0, + 0, + 0, + 0, + 0, + 7729, + 7731, + 0, + 7732, + 0, + 7733, + 7735, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7739, + 0, + 0, + 7741, + 7745, + 0, + 7748, + 0, + 0, + 0, + 7751, + 0, + 0, + 0, + 7752, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7753, + 0, + 0, + 7756, + 0, + 7757, + 0, + 7759, + 0, + 7760, + 0, + 0, + 0, + 0, + 7761, + 7768, + 0, + 0, + 7769, + 0, + 0, + 7770, + 0, + 0, + 7771, + 0, + 0, + 7772, + 0, + 0, + 7773, + 0, + 0, + 0, + 0, + 0, + 7778, + 7783, + 0, + 0, + 0, + 0, + 0, + 7784, + 7785, + 0, + 7790, + 0, + 0, + 0, + 0, + 7792, + 0, + 7798, + 0, + 0, + 0, + 0, + 0, + 7799, + 0, + 7810, + 0, + 0, + 7813, + 0, + 7814, + 0, + 7816, + 0, + 7818, + 7824, + 7825, + 7826, + 0, + 7828, + 7830, + 0, + 0, + 0, + 7840, + 0, + 7842, + 0, + 7843, + 0, + 0, + 0, + 0, + 7844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7846, + 0, + 0, + 0, + 0, + 0, + 7856, + 7857, + 7858, + 7862, + 0, + 7865, + 0, + 0, + 7866, + 0, + 0, + 7913, + 0, + 0, + 0, + 0, + 7914, + 0, + 0, + 7915, + 7917, + 7918, + 7919, + 0, + 7920, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7921, + 7922, + 0, + 7924, + 0, + 0, + 7925, + 0, + 0, + 7927, + 0, + 7930, + 7935, + 0, + 0, + 7937, + 0, + 0, + 0, + 0, + 0, + 0, + 7939, + 0, + 7940, + 0, + 0, + 0, + 0, + 0, + 7941, + 0, + 0, + 0, + 0, + 7945, + 0, + 0, + 0, + 0, + 7949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7950, + 0, + 7953, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7968, + 0, + 0, + 0, + 0, + 7969, + 7972, + 7992, + 0, + 7993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7994, + 0, + 0, + 0, + 0, + 8007, + 8008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8010, + 0, + 0, + 0, + 8012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8018, + 0, + 8028, + 8029, + 0, + 0, + 8030, + 0, + 0, + 8032, + 8033, + 0, + 0, + 8034, + 8036, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8037, + 0, + 0, + 0, + 8043, + 8052, + 8059, + 8060, + 0, + 0, + 8061, + 0, + 0, + 0, + 8062, + 0, + 8063, + 0, + 8064, + 0, + 8066, + 8068, + 0, + 0, + 0, + 8080, + 8081, + 0, + 8089, + 0, + 0, + 0, + 0, + 0, + 8092, + 0, + 0, + 0, + 0, + 0, + 0, + 8093, + 8110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8111, + 0, + 0, + 0, + 0, + 0, + 8112, + 8115, + 0, + 8117, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8120, + 8121, + 8122, + 8128, + 8129, + 8130, + 8131, + 0, + 0, + 8139, + 0, + 0, + 8144, + 0, + 0, + 0, + 0, + 8145, + 8146, + 8153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8154, + 0, + 8157, + 8160, + 8162, + 0, + 8164, + 8165, + 0, + 0, + 0, + 0, + 8166, + 8167, + 0, + 0, + 8179, + 0, + 0, + 0, + 8185, + 0, + 0, + 0, + 8186, + 0, + 0, + 8187, + 0, + 0, + 0, + 8188, + 0, + 0, + 0, + 0, + 0, + 8204, + 0, + 0, + 0, + 0, + 8210, + 0, + 0, + 0, + 0, + 0, + 8213, + 0, + 8214, + 0, + 0, + 8215, + 0, + 0, + 0, + 0, + 0, + 0, + 8218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8219, + 0, + 8221, + 0, + 0, + 8222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8225, + 0, + 0, + 0, + 8233, + 0, + 0, + 8242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8247, + 0, + 8248, + 8252, + 0, + 8256, + 8257, + 0, + 0, + 8261, + 0, + 8264, + 8265, + 0, + 0, + 0, + 0, + 8267, + 0, + 0, + 0, + 8269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8270, + 0, + 0, + 0, + 8278, + 0, + 8279, + 8283, + 0, + 0, + 8285, + 8286, + 8289, + 8292, + 0, + 0, + 0, + 0, + 8293, + 8295, + 8299, + 8300, + 8301, + 0, + 0, + 0, + 0, + 0, + 0, + 8304, + 8307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8321, + 0, + 0, + 0, + 8322, + 8323, + 8325, + 8326, + 8327, + 0, + 0, + 8332, + 8338, + 0, + 0, + 8340, + 0, + 0, + 0, + 0, + 0, + 8350, + 0, + 0, + 8351, + 0, + 8354, + 8355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8360, + 8372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8377, + 0, + 0, + 0, + 0, + 8380, + 0, + 0, + 0, + 8383, + 0, + 8384, + 0, + 0, + 0, + 0, + 8386, + 8392, + 0, + 0, + 8394, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8396, + 8397, + 0, + 8398, + 0, + 8399, + 0, + 0, + 0, + 0, + 0, + 8400, + 0, + 8401, + 8410, + 8411, + 0, + 8412, + 8413, + 8422, + 0, + 0, + 0, + 0, + 8423, + 0, + 0, + 0, + 0, + 8424, + 0, + 0, + 8425, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8441, + 8442, + 0, + 0, + 0, + 0, + 0, + 0, + 8443, + 0, + 0, + 8444, + 0, + 8447, + 0, + 0, + 0, + 0, + 8451, + 0, + 8458, + 0, + 8462, + 0, + 0, + 8468, + 0, + 8469, + 0, + 0, + 0, + 8470, + 0, + 8473, + 8479, + 8480, + 0, + 0, + 0, + 0, + 8481, + 8483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8484, + 0, + 0, + 8490, + 0, + 0, + 0, + 0, + 0, + 0, + 8491, + 8493, + 8494, + 0, + 8528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8530, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8534, + 8538, + 8540, + 0, + 0, + 8541, + 0, + 0, + 8545, + 0, + 8557, + 0, + 0, + 8569, + 8570, + 0, + 0, + 8571, + 8574, + 8575, + 8579, + 0, + 8583, + 0, + 0, + 0, + 0, + 8591, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8606, + 0, + 8607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8608, + 0, + 0, + 8609, + 0, + 0, + 0, + 8610, + 0, + 0, + 0, + 8611, + 0, + 0, + 8613, + 8617, + 8621, + 0, + 0, + 8622, + 0, + 8623, + 0, + 8624, + 8625, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8637, + 8638, + 8639, + 8650, + 0, + 0, + 0, + 0, + 8652, + 8654, + 8655, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8656, + 0, + 0, + 0, + 0, + 0, + 8657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8658, + 0, + 0, + 8659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8660, + 0, + 0, + 0, + 0, + 0, + 0, + 8661, + 8663, + 8664, + 0, + 0, + 0, + 0, + 8665, + 0, + 8669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8671, + 8674, + 0, + 8684, + 0, + 8686, + 0, + 0, + 0, + 8689, + 0, + 0, + 0, + 8690, + 0, + 8706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8710, + 0, + 8711, + 8713, + 8714, + 8724, + 8727, + 8728, + 8733, + 8736, + 0, + 8737, + 8739, + 0, + 0, + 0, + 0, + 8742, + 8743, + 8745, + 8754, + 0, + 0, + 0, + 0, + 8756, + 0, + 0, + 0, + 0, + 0, + 0, + 8757, + 8760, + 0, + 0, + 0, + 0, + 0, + 8762, + 8763, + 8764, + 0, + 8766, + 8769, + 8770, + 8773, + 0, + 8774, + 0, + 8779, + 0, + 0, + 0, + 0, + 8780, + 0, + 0, + 8781, + 0, + 0, + 8783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8785, + 0, + 0, + 0, + 0, + 8786, + 0, + 0, + 0, + 0, + 8788, + 8790, + 0, + 0, + 0, + 8803, + 0, + 8813, + 8814, + 0, + 0, + 0, + 0, + 0, + 8815, + 8816, + 0, + 0, + 0, + 0, + 8818, + 0, + 0, + 0, + 0, + 8822, + 8828, + 8829, + 0, + 8831, + 0, + 0, + 0, + 0, + 8833, + 0, + 0, + 0, + 8834, + 0, + 0, + 0, + 8835, + 0, + 8836, + 0, + 0, + 0, + 8837, + 0, + 0, + 0, + 0, + 0, + 0, + 8838, + 8839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8840, + 0, + 0, + 0, + 8841, + 0, + 8842, + 0, + 0, + 0, + 8846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8847, + 0, + 8848, + 0, + 0, + 8864, + 0, + 0, + 8866, + 0, + 0, + 8870, + 8872, + 0, + 0, + 8873, + 8874, + 0, + 0, + 0, + 0, + 0, + 0, + 8875, + 0, + 8876, + 0, + 0, + 0, + 0, + 8896, + 8900, + 0, + 0, + 0, + 0, + 8901, + 0, + 0, + 0, + 0, + 0, + 8904, + 0, + 8907, + 0, + 0, + 0, + 0, + 8911, + 8912, + 8913, + 0, + 0, + 0, + 8914, + 0, + 8915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8916, + 0, + 0, + 0, + 8929, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8930, + 0, + 8932, + 0, + 8943, + 0, + 0, + 0, + 8945, + 8947, + 0, + 0, + 0, + 0, + 8949, + 0, + 8950, + 0, + 8954, + 8957, + 0, + 0, + 8970, + 0, + 0, + 0, + 0, + 8971, + 0, + 8996, + 0, + 0, + 0, + 0, + 8997, + 9000, + 0, + 0, + 0, + 0, + 9001, + 9002, + 0, + 9004, + 9009, + 9024, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9027, + 9082, + 0, + 0, + 9083, + 9089, + 0, + 0, + 0, + 0, + 0, + 0, + 9090, + 0, + 0, + 0, + 9092, + 0, + 0, + 9093, + 0, + 9095, + 0, + 0, + 9096, + 9097, + 9101, + 9102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9112, + 0, + 0, + 0, + 0, + 0, + 0, + 9114, + 0, + 0, + 9120, + 0, + 9121, + 9122, + 0, + 0, + 0, + 9123, + 9124, + 0, + 0, + 9125, + 0, + 0, + 9126, + 0, + 9127, + 0, + 0, + 9129, + 9131, + 0, + 0, + 0, + 9132, + 0, + 0, + 9136, + 0, + 9144, + 0, + 0, + 9148, + 0, + 0, + 0, + 0, + 0, + 0, + 9149, + 0, + 9152, + 9163, + 0, + 0, + 9165, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9166, + 0, + 9169, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9170, + 0, + 0, + 0, + 0, + 9172, + 0, + 9174, + 9175, + 9176, + 0, + 9177, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9186, + 0, + 9187, + 0, + 0, + 0, + 9188, + 9189, + 0, + 0, + 9190, + 0, + 0, + 0, + 0, + 9191, + 0, + 0, + 0, + 9193, + 0, + 0, + 0, + 0, + 9197, + 9198, + 0, + 0, + 0, + 9208, + 9211, + 0, + 0, + 0, + 0, + 9216, + 9217, + 0, + 9220, + 0, + 0, + 0, + 0, + 9221, + 9222, + 9223, + 0, + 9224, + 9225, + 0, + 0, + 9227, + 0, + 9228, + 9229, + 0, + 0, + 9230, + 0, + 9232, + 0, + 9233, + 0, + 0, + 0, + 0, + 0, + 9234, + 9235, + 0, + 0, + 9237, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9238, + 9240, + 0, + 0, + 9241, + 0, + 0, + 0, + 0, + 9244, + 0, + 0, + 0, + 0, + 9247, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9248, + 0, + 0, + 0, + 9249, + 0, + 0, + 0, + 0, + 0, + 9250, + 0, + 0, + 0, + 0, + 9251, + 0, + 0, + 9252, + 9255, + 0, + 0, + 0, + 9256, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9257, + 0, + 0, + 9258, + 0, + 0, + 0, + 0, + 0, + 0, + 9259, + 0, + 0, + 0, + 0, + 0, + 9262, + 9263, + 0, + 0, + 9265, + 9266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9268, + 9271, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9273, + 0, + 0, + 0, + 9276, + 9277, + 9279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9280, + 0, + 0, + 9293, + 0, + 0, + 0, + 0, + 0, + 9297, + 9301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9308, + 9309, + 9313, + 9321, + 9322, + 0, + 9326, + 9327, + 0, + 0, + 9477, + 0, + 9479, + 0, + 0, + 0, + 0, + 9482, + 0, + 0, + 0, + 9483, + 0, + 9484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9485, + 0, + 0, + 9486, + 0, + 0, + 0, + 9489, + 0, + 0, + 0, + 0, + 9490, + 9491, + 0, + 0, + 0, + 0, + 9493, + 0, + 9495, + 9496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9500, + 0, + 9502, + 0, + 0, + 0, + 0, + 0, + 9504, + 9507, + 0, + 9509, + 0, + 9511, + 0, + 0, + 9513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9515, + 0, + 0, + 0, + 0, + 0, + 0, + 9516, + 9517, + 0, + 0, + 0, + 0, + 9532, + 0, + 0, + 9533, + 0, + 0, + 9538, + 0, + 9539, + 9540, + 0, + 0, + 0, + 0, + 9541, + 0, + 0, + 0, + 9542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9544, + 9545, + 0, + 9546, + 0, + 0, + 0, + 0, + 0, + 0, + 9547, + 9548, + 0, + 0, + 0, + 9550, + 0, + 9557, + 0, + 9558, + 0, + 9561, + 0, + 9563, + 9570, + 0, + 9572, + 9574, + 9575, + 0, + 0, + 0, + 9577, + 9592, + 0, + 0, + 9596, + 0, + 0, + 0, + 9598, + 0, + 9600, + 0, + 9601, + 0, + 0, + 0, + 0, + 0, + 0, + 9608, + 0, + 9638, + 9639, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9641, + 0, + 0, + 9643, + 9644, + 9645, + 9646, + 0, + 0, + 0, + 9648, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9650, + 9654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9655, + 0, + 0, + 0, + 0, + 0, + 9656, + 0, + 9657, + 0, + 0, + 0, + 0, + 9658, + 0, + 0, + 9659, + 0, + 0, + 9664, + 0, + 0, + 9665, + 0, + 9667, + 9669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9671, + 0, + 9673, + 9681, + 0, + 0, + 0, + 0, + 9682, + 9683, + 9684, + 0, + 0, + 0, + 0, + 9686, + 9698, + 0, + 0, + 9700, + 9701, + 9702, + 0, + 9703, + 9717, + 0, + 0, + 0, + 0, + 9718, + 0, + 9726, + 0, + 0, + 0, + 0, + 9727, + 0, + 0, + 0, + 9728, + 0, + 9742, + 0, + 9744, + 0, + 0, + 0, + 9750, + 0, + 9754, + 9755, + 0, + 0, + 0, + 0, + 0, + 9756, + 0, + 9757, + 9768, + 0, + 9769, + 0, + 0, + 0, + 9770, + 9771, + 0, + 9773, + 0, + 9774, + 0, + 9775, + 0, + 0, + 0, + 9776, + 9777, + 9784, + 0, + 0, + 0, + 9786, + 0, + 9789, + 0, + 0, + 0, + 0, + 9793, + 9794, + 0, + 0, + 0, + 9808, + 0, + 0, + 0, + 0, + 0, + 9811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9812, + 0, + 9820, + 0, + 9823, + 0, + 9828, + 0, + 0, + 0, + 0, + 9830, + 0, + 0, + 9833, + 9836, + 0, + 0, + 0, + 9840, + 0, + 0, + 0, + 9841, + 0, + 0, + 9842, + 0, + 9845, + 0, + 0, + 0, + 9847, + 9848, + 0, + 0, + 9855, + 0, + 0, + 0, + 0, + 0, + 0, + 9856, + 9863, + 9865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9866, + 9867, + 9868, + 9873, + 9875, + 0, + 0, + 0, + 0, + 0, + 0, + 9880, + 0, + 9886, + 0, + 0, + 0, + 9887, + 0, + 0, + 9891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9906, + 9907, + 9908, + 0, + 0, + 0, + 9909, + 0, + 0, + 0, + 0, + 0, + 0, + 9910, + 0, + 0, + 0, + 0, + 9913, + 0, + 0, + 0, + 0, + 9914, + 0, + 0, + 0, + 0, + 0, + 9922, + 0, + 0, + 0, + 0, + 9923, + 9925, + 0, + 0, + 0, + 0, + 0, + 0, + 9930, + 0, + 0, + 0, + 9931, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9932, + 0, + 9939, + 0, + 0, + 9940, + 9962, + 9966, + 0, + 9969, + 9970, + 0, + 0, + 9974, + 0, + 9979, + 9981, + 9982, + 0, + 0, + 0, + 9985, + 0, + 0, + 0, + 0, + 0, + 0, + 9987, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9988, + 9993, + 0, + 0, + 9994, + 0, + 0, + 0, + 9997, + 0, + 10004, + 0, + 0, + 0, + 0, + 0, + 10007, + 10019, + 10020, + 10022, + 0, + 0, + 0, + 10031, + 0, + 0, + 0, + 0, + 0, + 10032, + 0, + 0, + 10034, + 0, + 10036, + 0, + 0, + 0, + 0, + 10038, + 0, + 10039, + 10040, + 10041, + 10042, + 0, + 0, + 0, + 0, + 0, + 10043, + 0, + 0, + 0, + 0, + 0, + 10045, + 10054, + 0, + 0, + 0, + 0, + 10055, + 0, + 0, + 10057, + 10058, + 0, + 0, + 0, + 0, + 0, + 0, + 10059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10063, + 0, + 10066, + 0, + 0, + 0, + 10070, + 0, + 10072, + 0, + 0, + 10076, + 10077, + 0, + 0, + 10084, + 0, + 10087, + 10090, + 10091, + 0, + 0, + 0, + 10094, + 10097, + 0, + 0, + 0, + 0, + 0, + 0, + 10098, + 0, + 0, + 0, + 0, + 0, + 0, + 10103, + 0, + 10104, + 0, + 10108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10120, + 0, + 0, + 0, + 10122, + 0, + 0, + 10125, + 0, + 0, + 0, + 0, + 10127, + 10128, + 0, + 0, + 10134, + 0, + 10135, + 10136, + 0, + 10137, + 0, + 0, + 10147, + 0, + 10149, + 10150, + 0, + 0, + 10156, + 0, + 10158, + 10159, + 10160, + 10168, + 0, + 0, + 10171, + 0, + 10173, + 0, + 0, + 0, + 10176, + 0, + 0, + 0, + 0, + 10177, + 0, + 0, + 0, + 0, + 10178, + 0, + 0, + 0, + 0, + 10194, + 0, + 10202, + 0, + 0, + 10203, + 10204, + 0, + 10205, + 10206, + 0, + 10207, + 0, + 0, + 0, + 0, + 10209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10213, + 0, + 0, + 0, + 0, + 0, + 0, + 10217, + 0, + 10229, + 0, + 10230, + 10231, + 0, + 0, + 10232, + 0, + 0, + 10237, + 10238, + 10244, + 0, + 0, + 0, + 0, + 0, + 10250, + 0, + 10252, + 0, + 0, + 0, + 0, + 0, + 0, + 10255, + 0, + 0, + 10257, + 0, + 0, + 0, + 0, + 0, + 0, + 10258, + 0, + 10259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10284, + 10288, + 10289, + 0, + 0, + 0, + 10290, + 0, + 10296, + 0, + 0, + 0, + 0, + 0, + 10297, + 0, + 0, + 0, + 0, + 0, + 0, + 10298, + 0, + 0, + 0, + 0, + 10299, + 10303, + 0, + 0, + 0, + 0, + 0, + 10306, + 0, + 0, + 0, + 10307, + 0, + 10308, + 0, + 0, + 0, + 0, + 10311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10315, + 10317, + 0, + 0, + 0, + 10318, + 10319, + 0, + 10321, + 0, + 10326, + 0, + 10328, + 0, + 0, + 0, + 0, + 10329, + 0, + 0, + 10331, + 0, + 10332, + 0, + 0, + 0, + 0, + 0, + 0, + 10334, + 0, + 0, + 10335, + 10338, + 0, + 0, + 0, + 0, + 0, + 10339, + 10349, + 0, + 0, + 0, + 0, + 0, + 0, + 10351, + 0, + 10353, + 0, + 0, + 0, + 0, + 0, + 0, + 10362, + 0, + 10368, + 0, + 10369, + 0, + 0, + 0, + 10372, + 10373, + 0, + 0, + 0, + 0, + 0, + 10374, + 0, + 0, + 0, + 10375, + 0, + 10376, + 0, + 0, + 10386, + 10388, + 10390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10391, + 0, + 0, + 10392, + 10394, + 0, + 0, + 10396, + 0, + 10397, + 0, + 10403, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10404, + 0, + 10405, + 10410, + 0, + 0, + 10411, + 0, + 10412, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10421, + 10422, + 10423, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10425, + 0, + 0, + 10427, + 0, + 0, + 10430, + 0, + 0, + 0, + 0, + 0, + 10432, + 0, + 10433, + 10434, + 0, + 0, + 0, + 0, + 10436, + 10437, + 0, + 10438, + 0, + 10439, + 0, + 10444, + 10446, + 0, + 0, + 0, + 0, + 0, + 10448, + 0, + 0, + 0, + 0, + 0, + 10449, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10451, + 0, + 10453, + 0, + 0, + 0, + 10454, + 10457, + 0, + 0, + 10459, + 0, + 10469, + 0, + 0, + 0, + 0, + 0, + 10472, + 10481, + 0, + 0, + 0, + 0, + 0, + 10482, + 10483, + 0, + 10492, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10499, + 0, + 0, + 0, + 10502, + 0, + 0, + 10510, + 0, + 10521, + 10524, + 0, + 0, + 10525, + 10526, + 10528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10530, + 0, + 0, + 0, + 0, + 10533, + 0, + 10534, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10535, + 10536, + 0, + 0, + 10544, + 0, + 10553, + 10556, + 0, + 10557, + 10559, + 0, + 0, + 0, + 0, + 0, + 10562, + 10563, + 10564, + 0, + 10565, + 0, + 0, + 0, + 10566, + 0, + 10567, + 0, + 0, + 0, + 0, + 10575, + 0, + 0, + 10576, + 0, + 10578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10585, + 10586, + 10587, + 10589, + 0, + 10590, + 0, + 0, + 10594, + 0, + 0, + 0, + 0, + 0, + 10598, + 0, + 0, + 10601, + 0, + 0, + 0, + 10602, + 0, + 10603, + 0, + 10604, + 0, + 10605, + 0, + 0, + 10607, + 0, + 10626, + 0, + 10627, + 0, + 0, + 0, + 0, + 0, + 10629, + 10630, + 10631, + 0, + 0, + 0, + 10646, + 0, + 0, + 0, + 10647, + 0, + 10650, + 0, + 10651, + 0, + 0, + 0, + 10652, + 10653, + 10655, + 0, + 10658, + 0, + 0, + 10659, + 0, + 10667, + 0, + 0, + 0, + 0, + 10669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10670, + 0, + 0, + 0, + 10671, + 0, + 0, + 0, + 0, + 10672, + 10673, + 0, + 10674, + 0, + 0, + 0, + 10676, + 0, + 0, + 0, + 0, + 0, + 0, + 10678, + 0, + 10682, + 0, + 0, + 10692, + 0, + 10697, + 0, + 0, + 0, + 0, + 10698, + 0, + 0, + 0, + 10700, + 0, + 0, + 0, + 0, + 0, + 10703, + 0, + 10704, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10705, + 0, + 10715, + 10718, + 10720, + 0, + 0, + 10722, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10723, + 0, + 0, + 0, + 0, + 10726, + 0, + 0, + 0, + 0, + 0, + 10727, + 10730, + 10743, + 0, + 0, + 0, + 0, + 0, + 0, + 10744, + 0, + 0, + 10745, + 0, + 0, + 0, + 0, + 0, + 0, + 10748, + 0, + 0, + 0, + 0, + 10750, + 0, + 0, + 10752, + 10753, + 0, + 0, + 0, + 10756, + 0, + 0, + 0, + 0, + 0, + 0, + 10758, + 0, + 0, + 0, + 10759, + 0, + 10769, + 0, + 0, + 10772, + 0, + 0, + 0, + 0, + 0, + 0, + 10773, + 0, + 0, + 0, + 10777, + 0, + 0, + 10779, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10780, + 10784, + 0, + 0, + 0, + 10789, + 0, + 0, + 0, + 10791, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10795, + 0, + 0, + 10796, + 0, + 10808, + 0, + 10809, + 0, + 0, + 0, + 10810, + 0, + 0, + 0, + 10812, + 0, + 0, + 10814, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10815, + 0, + 0, + 0, + 0, + 10816, + 10817, + 0, + 0, + 0, + 0, + 10819, + 0, + 10820, + 0, + 0, + 0, + 0, + 10821, + 10822, + 10823, + 0, + 10826, + 10849, + 0, + 0, + 0, + 0, + 10850, + 0, + 0, + 10852, + 0, + 10853, + 0, + 0, + 10856, + 0, + 0, + 10857, + 10858, + 10859, + 10860, + 0, + 0, + 0, + 0, + 0, + 0, + 10863, + 0, + 10866, + 10867, + 10872, + 10890, + 0, + 0, + 10891, + 10892, + 0, + 0, + 0, + 0, + 0, + 10893, + 0, + 0, + 0, + 10896, + 10899, + 0, + 0, + 10900, + 10902, + 0, + 0, + 0, + 0, + 0, + 10903, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10905, + 0, + 10906, + 0, + 0, + 0, + 0, + 10908, + 10911, + 0, + 10912, + 0, + 0, + 10916, + 0, + 0, + 0, + 0, + 0, + 10917, + 0, + 10918, + 0, + 0, + 0, + 10923, + 0, + 0, + 0, + 0, + 0, + 10924, + 0, + 0, + 10928, + 10929, + 0, + 0, + 10930, + 0, + 0, + 0, + 10932, + 0, + 0, + 0, + 0, + 10939, + 0, + 0, + 10945, + 0, + 0, + 0, + 10947, + 0, + 0, + 10948, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10958, + 0, + 10960, + 10962, + 0, + 0, + 10964, + 0, + 0, + 0, + 10966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10967, + 0, + 0, + 0, + 10968, + 0, + 0, + 0, + 10973, + 0, + 0, + 0, + 0, + 0, + 10975, + 0, + 0, + 0, + 10976, + 10978, + 0, + 0, + 10982, + 10984, + 10987, + 0, + 0, + 10988, + 0, + 10989, + 0, + 0, + 10991, + 0, + 0, + 0, + 0, + 10992, + 0, + 0, + 0, + 10993, + 0, + 10995, + 0, + 0, + 0, + 10996, + 10997, + 0, + 0, + 0, + 10998, + 0, + 10999, + 0, + 11001, + 0, + 0, + 0, + 0, + 0, + 0, + 11010, + 11012, + 0, + 11013, + 11016, + 11017, + 0, + 0, + 11019, + 11020, + 11021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11022, + 0, + 0, + 11023, + 11029, + 0, + 0, + 0, + 0, + 11031, + 0, + 0, + 0, + 11034, + 0, + 0, + 0, + 0, + 11055, + 0, + 0, + 0, + 0, + 0, + 11056, + 11060, + 0, + 0, + 0, + 0, + 0, + 0, + 11061, + 0, + 0, + 11064, + 11065, + 0, + 11066, + 0, + 11069, + 0, + 11085, + 0, + 0, + 0, + 0, + 0, + 11086, + 0, + 0, + 0, + 11088, + 0, + 0, + 0, + 11094, + 0, + 0, + 0, + 11095, + 11096, + 0, + 0, + 0, + 0, + 0, + 0, + 11097, + 11098, + 0, + 0, + 0, + 0, + 0, + 0, + 11099, + 0, + 0, + 11102, + 11108, + 0, + 0, + 0, + 11109, + 0, + 11114, + 11119, + 0, + 11131, + 0, + 0, + 0, + 11142, + 0, + 0, + 11143, + 0, + 11146, + 0, + 11147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11148, + 0, + 11149, + 11152, + 11153, + 11154, + 0, + 11156, + 0, + 11157, + 0, + 0, + 0, + 11158, + 0, + 0, + 11159, + 11160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11163, + 0, + 0, + 11164, + 11166, + 0, + 0, + 0, + 11172, + 11174, + 0, + 0, + 0, + 11176, + 0, + 0, + 0, + 0, + 0, + 11182, + 11183, + 0, + 0, + 0, + 11184, + 11187, + 0, + 0, + 11188, + 11189, + 0, + 0, + 0, + 0, + 0, + 0, + 11194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11200, + 11202, + 0, + 0, + 0, + 0, + 0, + 0, + 11203, + 0, + 11204, + 0, + 0, + 0, + 0, + 0, + 11205, + 0, + 0, + 0, + 11206, + 0, + 11207, + 0, + 0, + 11209, + 0, + 11211, + 0, + 11214, + 0, + 0, + 11231, + 0, + 0, + 0, + 11293, + 11295, + 0, + 0, + 11296, + 11297, + 11302, + 0, + 0, + 0, + 11307, + 0, + 0, + 0, + 0, + 11309, + 11310, + 0, + 11311, + 0, + 0, + 0, + 11313, + 0, + 11314, + 0, + 0, + 0, + 0, + 11334, + 0, + 11338, + 0, + 0, + 0, + 11339, + 0, + 0, + 0, + 0, + 0, + 11340, + 0, + 11341, + 11342, + 0, + 11344, + 0, + 11345, + 0, + 0, + 0, + 11348, + 11349, + 0, + 0, + 11350, + 0, + 0, + 0, + 11355, + 0, + 0, + 0, + 0, + 0, + 0, + 11356, + 0, + 11357, + 11370, + 0, + 0, + 11371, + 0, + 11374, + 11376, + 0, + 0, + 0, + 11377, + 0, + 0, + 11378, + 11383, + 0, + 11386, + 11399, + 0, + 11400, + 11406, + 0, + 0, + 0, + 11408, + 0, + 0, + 11409, + 11412, + 0, + 0, + 0, + 0, + 11417, + 0, + 0, + 0, + 11418, + 0, + 11421, + 0, + 11426, + 11429, + 0, + 0, + 0, + 0, + 0, + 11430, + 0, + 11437, + 0, + 11438, + 0, + 0, + 0, + 0, + 0, + 11440, + 11453, + 0, + 0, + 0, + 0, + 0, + 0, + 11454, + 0, + 0, + 0, + 0, + 11455, + 0, + 0, + 11456, + 11460, + 11461, + 11463, + 0, + 11469, + 0, + 11473, + 0, + 0, + 0, + 0, + 11474, + 0, + 0, + 0, + 11475, + 0, + 11476, + 11477, + 11480, + 0, + 0, + 0, + 0, + 11481, + 0, + 0, + 11484, + 0, + 0, + 11487, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11497, + 0, + 0, + 11502, + 0, + 11509, + 0, + 0, + 11510, + 11511, + 11513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11515, + 0, + 0, + 0, + 0, + 11516, + 0, + 11520, + 11521, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11529, + 11530, + 11531, + 11534, + 0, + 0, + 11543, + 0, + 0, + 0, + 0, + 0, + 11547, + 0, + 11548, + 0, + 0, + 0, + 0, + 0, + 11552, + 11556, + 0, + 11557, + 0, + 0, + 11559, + 0, + 11560, + 0, + 0, + 0, + 0, + 0, + 0, + 11561, + 0, + 0, + 11563, + 11564, + 0, + 11565, + 0, + 0, + 0, + 0, + 11567, + 0, + 0, + 0, + 11569, + 0, + 11574, + 0, + 11575, + 0, + 0, + 0, + 11577, + 0, + 11578, + 0, + 0, + 0, + 11580, + 11581, + 0, + 0, + 0, + 11582, + 11584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11587, + 0, + 11588, + 11591, + 0, + 11595, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11596, + 0, + 11597, + 0, + 0, + 0, + 0, + 11598, + 11601, + 0, + 0, + 0, + 11602, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11603, + 11604, + 0, + 11606, + 0, + 0, + 11608, + 0, + 0, + 0, + 0, + 11610, + 0, + 0, + 11611, + 0, + 0, + 0, + 0, + 11613, + 0, + 11622, + 0, + 0, + 0, + 11623, + 0, + 0, + 0, + 0, + 11625, + 0, + 0, + 11626, + 11627, + 11628, + 11630, + 0, + 0, + 0, + 0, + 0, + 0, + 11639, + 0, + 0, + 11646, + 0, + 11648, + 11649, + 0, + 11650, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11651, + 0, + 0, + 11652, + 11653, + 11656, + 0, + 0, + 11677, + 11679, + 0, + 0, + 0, + 0, + 11680, + 0, + 0, + 11681, + 0, + 11685, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11688, + 0, + 0, + 0, + 11716, + 0, + 11719, + 0, + 0, + 0, + 0, + 0, + 11721, + 0, + 0, + 11724, + 11743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11745, + 11748, + 11750, + 0, + 0, + 0, + 0, + 0, + 11751, + 0, + 0, + 0, + 11752, + 11754, + 0, + 11755, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11759, + 0, + 0, + 0, + 0, + 0, + 0, + 11760, + 0, + 0, + 0, + 11761, + 0, + 0, + 0, + 0, + 0, + 0, + 11766, + 11767, + 0, + 11772, + 11773, + 0, + 11774, + 0, + 0, + 11775, + 0, + 11777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11778, + 11780, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11783, + 0, + 11784, + 0, + 0, + 0, + 11785, + 0, + 0, + 0, + 11786, + 0, + 0, + 0, + 0, + 11788, + 0, + 0, + 11789, + 11791, + 11792, + 0, + 0, + 0, + 0, + 11795, + 11834, + 11835, + 11836, + 0, + 0, + 11837, + 0, + 0, + 0, + 11838, + 0, + 0, + 11846, + 11851, + 0, + 11852, + 0, + 11869, + 0, + 0, + 0, + 11871, + 0, + 0, + 0, + 11872, + 11874, + 0, + 0, + 0, + 0, + 0, + 0, + 11875, + 0, + 11876, + 11877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11883, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11884, + 0, + 11885, + 0, + 11886, + 0, + 0, + 11887, + 0, + 11894, + 11895, + 11897, + 11909, + 11910, + 0, + 11912, + 11918, + 0, + 0, + 11920, + 0, + 11922, + 11924, + 11927, + 11928, + 0, + 0, + 0, + 0, + 11929, + 0, + 11934, + 0, + 0, + 0, + 0, + 0, + 11941, + 11943, + 11944, + 0, + 11945, + 0, + 0, + 0, + 0, + 11948, + 11949, + 0, + 0, + 0, + 0, + 11953, + 0, + 11954, + 0, + 11955, + 0, + 11956, + 0, + 0, + 0, + 0, + 0, + 11957, + 0, + 0, + 11959, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11961, + 0, + 0, + 0, + 0, + 0, + 11978, + 0, + 0, + 0, + 11979, + 11980, + 11986, + 11987, + 0, + 11992, + 0, + 0, + 0, + 0, + 0, + 11993, + 0, + 0, + 0, + 11994, + 0, + 11999, + 12004, + 12005, + 12006, + 0, + 0, + 0, + 0, + 0, + 12011, + 0, + 0, + 12012, + 12014, + 0, + 0, + 12015, + 0, + 0, + 12019, + 12028, + 0, + 0, + 12029, + 0, + 0, + 12032, + 12033, + 0, + 0, + 0, + 0, + 12034, + 0, + 12041, + 12043, + 0, + 0, + 12044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12054, + 12055, + 0, + 12056, + 0, + 0, + 0, + 12060, + 12064, + 0, + 0, + 0, + 0, + 0, + 12065, + 12067, + 12068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12074, + 0, + 0, + 0, + 12075, + 12076, + 0, + 0, + 0, + 12079, + 0, + 12081, + 12086, + 12087, + 0, + 0, + 12088, + 0, + 0, + 0, + 0, + 12089, + 0, + 12092, + 0, + 0, + 0, + 0, + 12097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12102, + 12103, + 12104, + 12111, + 0, + 0, + 12114, + 12116, + 0, + 0, + 0, + 12118, + 0, + 0, + 0, + 12119, + 12120, + 12128, + 0, + 0, + 0, + 0, + 12130, + 0, + 0, + 0, + 0, + 0, + 0, + 12131, + 0, + 0, + 0, + 12132, + 12134, + 0, + 0, + 0, + 0, + 12137, + 0, + 12139, + 0, + 12141, + 0, + 0, + 12142, + 0, + 0, + 0, + 12144, + 0, + 0, + 0, + 0, + 0, + 12145, + 0, + 12148, + 0, + 12153, + 0, + 0, + 0, + 0, + 12154, + 12171, + 12173, + 0, + 0, + 0, + 12175, + 0, + 0, + 0, + 0, + 12178, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12184, + 0, + 0, + 0, + 12186, + 0, + 0, + 0, + 0, + 0, + 12187, + 12188, + 0, + 0, + 12189, + 0, + 12196, + 0, + 12197, + 0, + 0, + 12198, + 0, + 12201, + 0, + 0, + 0, + 0, + 12203, + 0, + 12209, + 0, + 0, + 0, + 0, + 12210, + 12211, + 12212, + 12213, + 0, + 12217, + 12218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12223, + 0, + 0, + 12229, + 0, + 0, + 0, + 0, + 12233, + 0, + 0, + 0, + 0, + 12234, + 0, + 0, + 12236, + 12242, + 0, + 0, + 0, + 12243, + 0, + 0, + 0, + 12244, + 12253, + 0, + 12254, + 12256, + 0, + 12257, + 0, + 0, + 12275, + 0, + 0, + 0, + 0, + 0, + 12277, + 0, + 0, + 0, + 0, + 0, + 12278, + 0, + 12289, + 0, + 0, + 12290, + 0, + 12292, + 12293, + 0, + 0, + 12294, + 0, + 12295, + 0, + 0, + 12296, + 0, + 12297, + 0, + 12298, + 0, + 0, + 0, + 0, + 12301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12309, + 0, + 12338, + 12340, + 0, + 0, + 0, + 0, + 12341, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12342, + 12343, + 0, + 12344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12346, + 0, + 0, + 0, + 0, + 12348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12351, + 0, + 12355, + 12356, + 12357, + 0, + 0, + 12367, + 12370, + 12371, + 0, + 0, + 0, + 0, + 0, + 12372, + 12376, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12379, + 0, + 12382, + 0, + 12383, + 0, + 0, + 12384, + 0, + 0, + 0, + 0, + 12393, + 0, + 0, + 12394, + 0, + 0, + 0, + 0, + 12398, + 12403, + 0, + 0, + 12404, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12410, + 0, + 0, + 0, + 12411, + 0, + 0, + 0, + 12412, + 0, + 0, + 0, + 0, + 12420, + 0, + 12421, + 0, + 0, + 0, + 0, + 0, + 12423, + 0, + 12425, + 12429, + 0, + 0, + 0, + 12431, + 12432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12434, + 0, + 0, + 0, + 0, + 0, + 12435, + 12436, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12437, + 0, + 0, + 0, + 0, + 0, + 12438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12445, + 0, + 0, + 0, + 12450, + 12451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12452, + 12475, + 0, + 0, + 12493, + 12494, + 0, + 0, + 0, + 12495, + 0, + 0, + 0, + 0, + 12496, + 12502, + 12509, + 0, + 0, + 0, + 0, + 12510, + 0, + 12512, + 12513, + 0, + 0, + 0, + 0, + 12514, + 0, + 0, + 0, + 12515, + 0, + 12520, + 0, + 0, + 0, + 12524, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12527, + 0, + 0, + 0, + 12528, + 0, + 0, + 0, + 12529, + 0, + 0, + 0, + 0, + 0, + 12530, + 0, + 12535, + 0, + 0, + 12536, + 0, + 12538, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12540, + 0, + 12548, + 0, + 0, + 0, + 0, + 0, + 12550, + 0, + 0, + 0, + 12551, + 12552, + 0, + 0, + 0, + 12554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12555, + 0, + 0, + 12562, + 0, + 12565, + 0, + 12566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12569, + 0, + 0, + 0, + 12571, + 12574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12577, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12578, + 12579, + 12603, + 0, + 12608, + 0, + 0, + 12611, + 0, + 12612, + 0, + 12615, + 0, + 12625, + 0, + 0, + 0, + 0, + 12627, + 12646, + 0, + 12648, + 0, + 0, + 12657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12670, + 0, + 0, + 12671, + 0, + 12673, + 12677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12679, + 0, + 12681, + 0, + 12682, + 12693, + 0, + 12694, + 0, + 12697, + 0, + 12701, + 0, + 0, + 0, + 12703, + 12704, + 0, + 0, + 0, + 0, + 12707, + 12737, + 0, + 0, + 12739, + 0, + 0, + 12740, + 0, + 0, + 12742, + 12743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12745, + 0, + 12746, + 12747, + 0, + 12748, + 0, + 0, + 12759, + 12767, + 0, + 0, + 0, + 0, + 12773, + 0, + 12774, + 12778, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12779, + 0, + 0, + 0, + 0, + 0, + 12780, + 12793, + 0, + 12824, + 0, + 12825, + 0, + 12836, + 0, + 0, + 0, + 0, + 12839, + 0, + 12842, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12843, + 12845, + 0, + 12846, + 0, + 0, + 0, + 0, + 12847, + 0, + 0, + 12850, + 12852, + 12853, + 0, + 0, + 0, + 12854, + 0, + 0, + 0, + 12855, + 0, + 12856, + 0, + 12858, + 0, + 0, + 12859, + 0, + 12862, + 0, + 12863, + 0, + 0, + 12866, + 0, + 12869, + 12872, + 12873, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12875, + 0, + 12877, + 0, + 0, + 12878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12884, + 12885, + 12888, + 0, + 12889, + 0, + 0, + 0, + 0, + 12893, + 0, + 0, + 0, + 12895, + 12896, + 12898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12902, + 0, + 12909, + 12910, + 0, + 12926, + 0, + 12928, + 0, + 0, + 0, + 12929, + 0, + 12930, + 0, + 0, + 0, + 0, + 12931, + 0, + 12932, + 12933, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12934, + 0, + 12942, + 0, + 0, + 0, + 0, + 12944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12946, + 0, + 0, + 12948, + 0, + 0, + 12949, + 0, + 0, + 0, + 0, + 12950, + 0, + 0, + 0, + 0, + 12951, + 0, + 12952, + 0, + 12953, + 0, + 0, + 0, + 12954, + 12958, + 12959, + 0, + 0, + 0, + 0, + 0, + 12960, + 12964, + 0, + 0, + 0, + 0, + 0, + 12966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12970, + 0, + 12971, + 0, + 0, + 0, + 0, + 0, + 0, + 12972, + 0, + 0, + 12982, + 0, + 0, + 0, + 12984, + 12985, + 0, + 12986, + 12996, + 12997, + 13001, + 13002, + 0, + 0, + 0, + 0, + 13004, + 0, + 0, + 13005, + 0, + 0, + 13007, + 13009, + 0, + 13017, + 0, + 0, + 0, + 13020, + 0, + 13021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13022, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13024, + 13027, + 0, + 0, + 0, + 0, + 0, + 13028, + 0, + 0, + 13029, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13032, + 0, + 13037, + 0, + 0, + 0, + 0, + 0, + 0, + 13040, + 0, + 0, + 13041, + 0, + 0, + 0, + 13043, + 13044, + 13046, + 0, + 0, + 0, + 0, + 13047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13049, + 13054, + 0, + 13056, + 0, + 0, + 13060, + 13061, + 0, + 0, + 0, + 0, + 0, + 13067, + 0, + 0, + 13068, + 0, + 13071, + 0, + 0, + 0, + 0, + 0, + 13077, + 13078, + 0, + 0, + 0, + 0, + 0, + 13079, + 13080, + 13081, + 0, + 13082, + 0, + 0, + 0, + 13085, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13086, + 0, + 13087, + 13088, + 0, + 0, + 0, + 0, + 0, + 13094, + 0, + 13099, + 0, + 13100, + 0, + 0, + 0, + 13101, + 0, + 13125, + 13126, + 13128, + 13129, + 0, + 0, + 13130, + 0, + 13131, + 0, + 0, + 0, + 0, + 0, + 0, + 13134, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13150, + 0, + 13168, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13169, + 0, + 0, + 13170, + 0, + 0, + 0, + 0, + 13174, + 0, + 0, + 0, + 13176, + 0, + 0, + 0, + 0, + 0, + 13177, + 0, + 13178, + 13183, + 13187, + 0, + 0, + 0, + 13189, + 0, + 0, + 13190, + 0, + 0, + 13191, + 0, + 0, + 13206, + 0, + 0, + 0, + 13207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13212, + 0, + 0, + 13219, + 13232, + 0, + 0, + 0, + 13241, + 0, + 13249, + 13253, + 0, + 0, + 0, + 0, + 0, + 13255, + 13259, + 0, + 13260, + 13261, + 0, + 13262, + 0, + 13272, + 0, + 0, + 0, + 0, + 13276, + 0, + 0, + 0, + 0, + 13277, + 13299, + 0, + 0, + 13301, + 13302, + 0, + 0, + 13303, + 0, + 0, + 13305, + 0, + 13310, + 0, + 0, + 0, + 13311, + 0, + 0, + 0, + 0, + 13325, + 0, + 13328, + 0, + 0, + 0, + 13329, + 0, + 0, + 0, + 0, + 0, + 0, + 13330, + 0, + 0, + 13331, + 0, + 13335, + 0, + 0, + 13342, + 0, + 0, + 0, + 0, + 0, + 13343, + 0, + 13354, + 0, + 13362, + 0, + 13366, + 13367, + 13369, + 0, + 0, + 13371, + 13372, + 0, + 13373, + 13374, + 0, + 13376, + 0, + 13380, + 13381, + 13386, + 0, + 13387, + 13388, + 0, + 13389, + 13391, + 13395, + 0, + 0, + 0, + 0, + 0, + 13401, + 13409, + 0, + 13410, + 0, + 0, + 0, + 0, + 13420, + 0, + 0, + 0, + 0, + 0, + 13422, + 0, + 0, + 0, + 0, + 13423, + 0, + 0, + 0, + 0, + 13425, + 0, + 0, + 0, + 0, + 0, + 13427, + 0, + 0, + 0, + 13428, + 0, + 0, + 13430, + 13438, + 0, + 13439, + 0, + 13445, + 0, + 13448, + 13449, + 0, + 0, + 0, + 0, + 0, + 0, + 13451, + 0, + 13457, + 0, + 0, + 0, + 0, + 13458, + 13459, + 0, + 13460, + 0, + 0, + 0, + 0, + 13464, + 13465, + 13466, + 13470, + 0, + 13471, + 13472, + 13474, + 13475, + 0, + 13476, + 0, + 0, + 13478, + 13479, + 0, + 13481, + 0, + 0, + 0, + 0, + 13487, + 0, + 13490, + 0, + 13493, + 0, + 0, + 13494, + 0, + 0, + 13495, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13496, + 13497, + 0, + 13500, + 0, + 0, + 13516, + 13522, + 0, + 0, + 13525, + 13528, + 0, + 0, + 0, + 13530, + 13535, + 0, + 13537, + 13539, + 0, + 13540, + 0, + 13543, + 0, + 13544, + 0, + 0, + 0, + 0, + 0, + 0, + 13545, + 0, + 0, + 0, + 0, + 0, + 0, + 13547, + 0, + 0, + 0, + 13549, + 13555, + 0, + 0, + 0, + 13556, + 13557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13558, + 0, + 13563, + 0, + 0, + 0, + 0, + 13564, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13566, + 0, + 0, + 0, + 0, + 0, + 0, + 13569, + 0, + 0, + 13571, + 0, + 0, + 0, + 0, + 13573, + 0, + 0, + 0, + 0, + 0, + 0, + 13578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13581, + 0, + 13586, + 0, + 13595, + 0, + 13600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13601, + 13603, + 0, + 13604, + 13605, + 13606, + 13607, + 0, + 0, + 13617, + 13618, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13623, + 0, + 13625, + 13627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13629, + 0, + 0, + 0, + 13634, + 0, + 0, + 0, + 13638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13656, + 0, + 13659, + 0, + 0, + 13660, + 0, + 0, + 13662, + 0, + 0, + 0, + 13663, + 0, + 13664, + 0, + 0, + 0, + 0, + 0, + 13668, + 0, + 13669, + 13671, + 0, + 0, + 13672, + 0, + 0, + 0, + 0, + 0, + 0, + 13675, + 13685, + 0, + 13686, + 0, + 0, + 0, + 13687, + 0, + 0, + 0, + 13692, + 13694, + 13697, + 0, + 0, + 0, + 13702, + 0, + 0, + 0, + 0, + 0, + 13705, + 0, + 0, + 0, + 0, + 13707, + 0, + 0, + 0, + 13714, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13715, + 0, + 13716, + 13717, + 0, + 0, + 13719, + 13724, + 13730, + 13731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13732, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13734, + 0, + 13736, + 0, + 0, + 13737, + 13738, + 13747, + 0, + 13751, + 0, + 0, + 13752, + 0, + 0, + 0, + 13753, + 0, + 13757, + 0, + 0, + 13762, + 13763, + 0, + 13764, + 13765, + 0, + 13766, + 0, + 0, + 13767, + 0, + 0, + 0, + 13768, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13769, + 0, + 0, + 13772, + 0, + 13775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13776, + 13778, + 13787, + 0, + 0, + 0, + 13797, + 0, + 13798, + 0, + 13801, + 0, + 13804, + 13806, + 0, + 0, + 0, + 0, + 13816, + 13817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13834, + 0, + 13836, + 0, + 0, + 13838, + 0, + 0, + 13839, + 0, + 13840, + 0, + 0, + 0, + 0, + 13842, + 0, + 0, + 0, + 0, + 0, + 0, + 13843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13845, + 0, + 0, + 0, + 0, + 0, + 13858, + 0, + 0, + 13860, + 0, + 0, + 13861, + 0, + 0, + 13862, + 13863, + 0, + 13868, + 0, + 13869, + 13870, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13872, + 0, + 0, + 0, + 0, + 13873, + 13878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13886, + 0, + 13888, + 13889, + 13890, + 0, + 0, + 13891, + 13894, + 0, + 13897, + 13899, + 13900, + 13904, + 0, + 0, + 13906, + 0, + 0, + 0, + 13909, + 0, + 0, + 0, + 13910, + 0, + 0, + 0, + 13911, + 0, + 0, + 0, + 0, + 0, + 13912, + 13917, + 0, + 0, + 0, + 0, + 13918, + 0, + 13919, + 0, + 0, + 13920, + 0, + 0, + 0, + 13921, + 0, + 0, + 13922, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13924, + 0, + 13927, + 0, + 0, + 0, + 0, + 0, + 13932, + 0, + 13933, + 0, + 13934, + 0, + 0, + 13935, + 0, + 13944, + 0, + 0, + 0, + 13954, + 0, + 0, + 13955, + 0, + 0, + 0, + 0, + 13956, + 0, + 13957, + 0, + 13967, + 13969, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13970, + 13990, + 0, + 13991, + 13994, + 0, + 13995, + 0, + 0, + 0, + 0, + 13996, + 0, + 0, + 13999, + 0, + 0, + 0, + 14018, + 0, + 14019, + 0, + 14021, + 0, + 0, + 0, + 0, + 0, + 0, + 14041, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14043, + 0, + 0, + 0, + 0, + 14046, + 0, + 0, + 0, + 14048, + 14049, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14051, + 0, + 0, + 14052, + 14056, + 0, + 14063, + 0, + 14064, + 14066, + 0, + 0, + 14067, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14068, + 0, + 0, + 0, + 14072, + 0, + 14074, + 14075, + 0, + 14076, + 14079, + 14085, + 14086, + 14087, + 14093, + 0, + 0, + 0, + 0, + 14095, + 0, + 0, + 0, + 0, + 0, + 0, + 14096, + 14097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14098, + 0, + 14102, + 0, + 0, + 0, + 0, + 0, + 14103, + 0, + 0, + 0, + 14104, + 0, + 0, + 14105, + 0, + 0, + 0, + 14107, + 14108, + 0, + 0, + 14109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14117, + 0, + 0, + 0, + 0, + 14118, + 0, + 0, + 0, + 0, + 14119, + 0, + 0, + 14120, + 0, + 0, + 14121, + 0, + 14122, + 14127, + 0, + 14128, + 14136, + 0, + 0, + 14138, + 0, + 14140, + 0, + 0, + 0, + 14141, + 14142, + 0, + 0, + 0, + 0, + 14146, + 0, + 0, + 14149, + 0, + 14151, + 0, + 0, + 0, + 14152, + 0, + 0, + 14153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14154, + 0, + 14156, + 14157, + 0, + 0, + 14159, + 0, + 14161, + 0, + 0, + 0, + 0, + 14162, + 0, + 0, + 0, + 0, + 0, + 0, + 14163, + 0, + 0, + 14173, + 0, + 0, + 0, + 0, + 0, + 0, + 14174, + 0, + 0, + 14176, + 0, + 0, + 14178, + 0, + 0, + 14179, + 14181, + 0, + 0, + 14182, + 14185, + 14187, + 0, + 14190, + 0, + 0, + 14197, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14198, + 0, + 0, + 0, + 0, + 0, + 0, + 14199, + 14200, + 0, + 0, + 0, + 14204, + 0, + 0, + 14208, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14231, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14234, + 0, + 0, + 14235, + 0, + 0, + 0, + 14240, + 14241, + 0, + 0, + 0, + 14246, + 0, + 0, + 0, + 14247, + 0, + 14250, + 0, + 0, + 14251, + 0, + 0, + 14254, + 0, + 0, + 14256, + 0, + 0, + 0, + 14260, + 0, + 14261, + 0, + 0, + 0, + 0, + 14262, + 14267, + 14269, + 0, + 0, + 14277, + 0, + 0, + 14278, + 0, + 14279, + 14282, + 0, + 0, + 0, + 14283, + 0, + 0, + 0, + 14284, + 14285, + 0, + 0, + 0, + 0, + 14286, + 0, + 0, + 0, + 14288, + 0, + 0, + 0, + 14289, + 0, + 14290, + 0, + 14293, + 14301, + 14302, + 14304, + 14305, + 0, + 14307, + 0, + 14308, + 14309, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14311, + 14312, + 0, + 0, + 14317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14318, + 0, + 0, + 0, + 0, + 14320, + 0, + 0, + 0, + 0, + 14321, + 14322, + 0, + 0, + 0, + 0, + 0, + 14326, + 14329, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14330, + 14331, + 0, + 0, + 0, + 0, + 14332, + 0, + 0, + 0, + 14333, + 0, + 0, + 14337, + 14340, + 0, + 14341, + 0, + 0, + 14342, + 0, + 14345, + 14346, + 0, + 0, + 14347, + 0, + 14362, + 0, + 0, + 0, + 0, + 0, + 14364, + 14365, + 14371, + 0, + 14373, + 0, + 0, + 14374, + 0, + 14379, + 0, + 14400, + 0, + 0, + 0, + 0, + 0, + 14401, + 0, + 0, + 14405, + 0, + 14406, + 0, + 14408, + 14409, + 0, + 0, + 0, + 14417, + 0, + 0, + 14424, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14430, + 0, + 0, + 0, + 14431, + 0, + 0, + 14435, + 0, + 14440, + 0, + 0, + 0, + 0, + 0, + 0, + 14442, + 0, + 0, + 14443, + 0, + 0, + 0, + 0, + 0, + 14446, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14454, + 0, + 14457, + 0, + 14460, + 0, + 0, + 14466, + 0, + 0, + 0, + 0, + 0, + 14467, + 0, + 0, + 0, + 0, + 0, + 0, + 14469, + 0, + 14477, + 0, + 0, + 0, + 0, + 0, + 0, + 14478, + 14482, + 0, + 0, + 0, + 14483, + 0, + 0, + 0, + 14485, + 14486, + 0, + 0, + 0, + 14487, + 14488, + 14489, + 14492, + 14493, + 14494, + 14495, + 14496, + 14497, + 0, + 14499, + 0, + 14501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14502, + 0, + 14507, + 14512, + 14513, + 14514, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14515, + 14526, + 14530, + 0, + 14537, + 0, + 14544, + 0, + 14547, + 0, + 0, + 14548, + 14550, + 14551, + 0, + 0, + 14552, + 0, + 0, + 0, + 14553, + 0, + 14554, + 0, + 0, + 0, + 0, + 14556, + 14564, + 0, + 0, + 14565, + 14566, + 0, + 0, + 0, + 0, + 0, + 0, + 14568, + 0, + 0, + 14569, + 0, + 0, + 0, + 14571, + 14576, + 0, + 0, + 14577, + 14578, + 14579, + 0, + 0, + 14580, + 0, + 0, + 0, + 0, + 14582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14583, + 0, + 0, + 0, + 0, + 0, + 14587, + 0, + 14588, + 0, + 0, + 14600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14601, + 0, + 0, + 14604, + 14605, + 14611, + 0, + 14613, + 0, + 0, + 0, + 0, + 14615, + 0, + 0, + 0, + 0, + 0, + 0, + 14627, + 0, + 14628, + 0, + 0, + 0, + 0, + 14631, + 0, + 14633, + 14634, + 0, + 0, + 0, + 0, + 14635, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14636, + 0, + 0, + 14639, + 14642, + 0, + 0, + 0, + 0, + 14644, + 0, + 0, + 0, + 0, + 14645, + 14646, + 0, + 14653, + 0, + 0, + 14654, + 0, + 14658, + 0, + 14661, + 0, + 0, + 0, + 14665, + 0, + 0, + 0, + 14668, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14669, + 0, + 0, + 14670, + 0, + 0, + 0, + 14680, + 0, + 0, + 14681, + 0, + 0, + 0, + 0, + 0, + 14682, + 14683, + 0, + 0, + 0, + 0, + 14686, + 0, + 0, + 0, + 0, + 14687, + 14697, + 0, + 0, + 0, + 0, + 14699, + 14705, + 14711, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14712, + 0, + 0, + 0, + 14713, + 0, + 0, + 0, + 0, + 14719, + 0, + 14720, + 14721, + 14726, + 0, + 0, + 0, + 14728, + 14729, + 0, + 0, + 0, + 0, + 14731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14733, + 14736, + 14737, + 0, + 0, + 14740, + 14742, + 0, + 0, + 0, + 14744, + 14753, + 0, + 0, + 0, + 0, + 14755, + 14758, + 14760, + 0, + 0, + 0, + 0, + 0, + 14761, + 14762, + 14765, + 14771, + 0, + 14772, + 0, + 14773, + 14774, + 0, + 0, + 14775, + 0, + 0, + 14776, + 0, + 0, + 0, + 0, + 14777, + 0, + 14779, + 0, + 0, + 14782, + 0, + 0, + 14785, + 14786, + 14788, + 0, + 0, + 0, + 0, + 0, + 14795, + 0, + 0, + 0, + 0, + 0, + 0, + 14798, + 0, + 14803, + 14804, + 14806, + 0, + 0, + 0, + 14809, + 0, + 0, + 0, + 0, + 0, + 0, + 14810, + 0, + 0, + 0, + 0, + 14811, + 0, + 14812, + 0, + 0, + 0, + 0, + 0, + 14815, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14816, + 0, + 14818, + 0, + 0, + 0, + 0, + 0, + 0, + 14819, + 0, + 14820, + 0, + 14823, + 0, + 0, + 0, + 14824, + 0, + 0, + 14826, + 14827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14830, + 0, + 0, + 0, + 0, + 0, + 14833, + 0, + 14845, + 0, + 0, + 0, + 0, + 0, + 14846, + 0, + 0, + 14847, + 14871, + 0, + 14873, + 0, + 14876, + 0, + 14877, + 14878, + 14880, + 0, + 0, + 0, + 0, + 0, + 14881, + 0, + 14882, + 14894, + 0, + 0, + 0, + 0, + 14895, + 0, + 14907, + 0, + 14908, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14911, + 0, + 0, + 0, + 0, + 14920, + 0, + 0, + 14931, + 0, + 14932, + 14934, + 14935, + 0, + 0, + 14936, + 0, + 14945, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14947, + 0, + 0, + 14948, + 14949, + 14951, + 0, + 0, + 14952, + 0, + 0, + 0, + 14964, + 14973, + 0, + 0, + 14990, + 0, + 0, + 0, + 0, + 14995, + 0, + 0, + 14998, + 15001, + 0, + 0, + 15002, + 15020, + 0, + 0, + 0, + 0, + 0, + 0, + 15021, + 0, + 15022, + 0, + 0, + 0, + 0, + 15023, + 0, + 0, + 15025, + 15029, + 15033, + 0, + 0, + 0, + 15034, + 0, + 0, + 0, + 15035, + 0, + 0, + 0, + 0, + 0, + 15043, + 15044, + 0, + 0, + 0, + 15045, + 15046, + 15048, + 15050, + 0, + 15065, + 0, + 0, + 0, + 0, + 15066, + 0, + 0, + 15075, + 15082, + 15084, + 0, + 0, + 15085, + 15086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15088, + 0, + 0, + 0, + 15089, + 0, + 0, + 0, + 0, + 15094, + 0, + 15096, + 0, + 15097, + 0, + 15100, + 0, + 0, + 15102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15105, + 0, + 0, + 15106, + 0, + 15109, + 15113, + 0, + 0, + 0, + 15115, + 0, + 15118, + 0, + 0, + 0, + 0, + 0, + 0, + 15119, + 0, + 0, + 15120, + 0, + 0, + 0, + 0, + 0, + 15123, + 15129, + 0, + 0, + 0, + 15130, + 0, + 15131, + 0, + 0, + 15134, + 0, + 15135, + 0, + 0, + 0, + 15137, + 15138, + 0, + 0, + 0, + 0, + 0, + 0, + 15139, + 0, + 0, + 0, + 0, + 0, + 15140, + 0, + 0, + 15154, + 15162, + 0, + 15169, + 15170, + 0, + 15175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15177, + 0, + 15178, + 15179, + 0, + 0, + 0, + 0, + 0, + 15183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15185, + 15187, + 0, + 15194, + 15195, + 15196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15204, + 0, + 0, + 0, + 0, + 15206, + 0, + 0, + 0, + 0, + 0, + 15207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15213, + 0, + 15214, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15232, + 0, + 0, + 0, + 0, + 15234, + 0, + 15238, + 15240, + 0, + 15248, + 0, + 0, + 0, + 0, + 15250, + 15251, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15252, + 0, + 0, + 0, + 15255, + 15262, + 15266, + 0, + 0, + 0, + 15267, + 0, + 0, + 0, + 15277, + 15279, + 0, + 0, + 0, + 15280, + 15281, + 15282, + 0, + 0, + 0, + 0, + 0, + 15285, + 0, + 0, + 0, + 0, + 15289, + 0, + 0, + 15291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15296, + 15297, + 0, + 0, + 15304, + 0, + 0, + 0, + 0, + 15306, + 0, + 0, + 0, + 0, + 0, + 0, + 15307, + 15308, + 0, + 15309, + 0, + 0, + 15311, + 0, + 0, + 15312, + 15313, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15314, + 15317, + 0, + 0, + 0, + 15318, + 15319, + 0, + 0, + 0, + 0, + 15320, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15321, + 0, + 0, + 0, + 0, + 0, + 15324, + 0, + 15325, + 15326, + 0, + 15330, + 0, + 0, + 0, + 0, + 15334, + 0, + 15335, + 0, + 15341, + 0, + 0, + 15342, + 0, + 0, + 15343, + 15344, + 0, + 0, + 0, + 0, + 15345, + 0, + 0, + 0, + 0, + 15347, + 0, + 0, + 15348, + 15349, + 15350, + 0, + 15356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15357, + 0, + 15358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15359, + 15360, + 15364, + 0, + 15380, + 0, + 0, + 0, + 0, + 0, + 15392, + 0, + 0, + 15393, + 0, + 15395, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15396, + 0, + 0, + 15397, + 15398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15399, + 0, + 15400, + 0, + 0, + 0, + 15402, + 0, + 15405, + 15410, + 0, + 0, + 0, + 0, + 15411, + 0, + 0, + 0, + 15412, + 0, + 15416, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15428, + 0, + 15435, + 0, + 0, + 15438, + 0, + 0, + 0, + 0, + 15439, + 0, + 0, + 0, + 15440, + 0, + 0, + 0, + 15441, + 15449, + 15451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15452, + 0, + 0, + 15455, + 0, + 0, + 0, + 15456, + 0, + 0, + 15458, + 0, + 15460, + 15461, + 0, + 0, + 0, + 0, + 0, + 15462, + 15464, + 0, + 15465, + 0, + 0, + 15466, + 0, + 0, + 15467, + 0, + 0, + 0, + 0, + 0, + 15468, + 0, + 0, + 0, + 0, + 15481, + 0, + 0, + 15484, + 0, + 15485, + 15486, + 0, + 0, + 0, + 15487, + 0, + 0, + 0, + 0, + 0, + 15488, + 0, + 15492, + 15498, + 0, + 0, + 0, + 15499, + 0, + 0, + 0, + 15500, + 0, + 15501, + 0, + 0, + 15512, + 0, + 15522, + 0, + 0, + 0, + 15524, + 0, + 15525, + 15526, + 0, + 0, + 15527, + 0, + 0, + 15545, + 15546, + 0, + 15548, + 15552, + 0, + 15553, + 0, + 0, + 0, + 15554, + 0, + 15555, + 0, + 15557, + 15565, + 15573, + 15577, + 15578, + 0, + 15582, + 0, + 15583, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15586, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15588, + 0, + 0, + 0, + 0, + 0, + 15589, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15593, + 15594, + 0, + 0, + 0, + 0, + 15595, + 0, + 0, + 0, + 0, + 0, + 0, + 15596, + 0, + 0, + 0, + 15597, + 0, + 0, + 0, + 0, + 15600, + 0, + 0, + 15601, + 0, + 0, + 0, + 0, + 15602, + 15603, + 0, + 0, + 0, + 0, + 0, + 0, + 15604, + 0, + 15609, + 0, + 0, + 15612, + 0, + 0, + 15613, + 0, + 0, + 15615, + 15617, + 15618, + 0, + 0, + 15620, + 0, + 15636, + 15637, + 0, + 0, + 15649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15650, + 0, + 0, + 15651, + 0, + 0, + 0, + 15656, + 0, + 15658, + 0, + 0, + 0, + 15664, + 0, + 0, + 15665, + 0, + 0, + 15668, + 0, + 0, + 0, + 0, + 0, + 15669, + 0, + 0, + 15674, + 0, + 0, + 15675, + 0, + 0, + 0, + 0, + 15676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15677, + 0, + 0, + 0, + 0, + 15678, + 0, + 0, + 0, + 0, + 0, + 15679, + 0, + 0, + 15681, + 0, + 15686, + 0, + 0, + 0, + 0, + 15687, + 0, + 15688, + 0, + 0, + 15690, + 0, + 0, + 0, + 15697, + 0, + 15699, + 15700, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15701, + 0, + 15702, + 15703, + 0, + 15704, + 0, + 15705, + 0, + 15707, + 0, + 15709, + 0, + 15712, + 15716, + 0, + 15717, + 0, + 15718, + 15720, + 0, + 0, + 0, + 0, + 0, + 15724, + 0, + 0, + 0, + 15725, + 0, + 15726, + 0, + 0, + 0, + 15740, + 0, + 15745, + 15746, + 0, + 0, + 15747, + 0, + 15748, + 0, + 0, + 0, + 0, + 0, + 15749, + 0, + 0, + 0, + 15752, + 0, + 15753, + 0, + 0, + 0, + 0, + 0, + 0, + 15759, + 0, + 0, + 0, + 15765, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15767, + 0, + 0, + 0, + 15771, + 0, + 0, + 15784, + 0, + 0, + 0, + 0, + 15785, + 15790, + 15791, + 0, + 0, + 15792, + 0, + 0, + 0, + 15807, + 0, + 15811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15818, + 0, + 0, + 0, + 15819, + 0, + 0, + 0, + 0, + 15821, + 0, + 0, + 0, + 0, + 0, + 15822, + 15824, + 0, + 0, + 15827, + 0, + 0, + 15829, + 15831, + 0, + 15832, + 0, + 0, + 15833, + 0, + 15835, + 15838, + 15839, + 15843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15844, + 0, + 0, + 0, + 0, + 15845, + 15851, + 15856, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15858, + 15860, + 0, + 15861, + 0, + 0, + 0, + 15864, + 0, + 0, + 0, + 0, + 15865, + 0, + 0, + 0, + 0, + 0, + 0, + 15866, + 0, + 15872, + 0, + 0, + 15876, + 0, + 0, + 0, + 0, + 15877, + 15878, + 15883, + 15885, + 0, + 0, + 15888, + 0, + 0, + 0, + 0, + 0, + 15889, + 15890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15893, + 0, + 0, + 15894, + 0, + 0, + 0, + 15895, + 0, + 15896, + 15897, + 0, + 15898, + 15901, + 15902, + 0, + 15911, + 15915, + 0, + 15916, + 0, + 15924, + 15935, + 0, + 15937, + 0, + 0, + 0, + 0, + 0, + 15950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15958, + 0, + 0, + 0, + 15961, + 0, + 0, + 15966, + 0, + 15967, + 0, + 0, + 15977, + 0, + 0, + 15978, + 0, + 0, + 15981, + 15982, + 15983, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15986, + 0, + 0, + 0, + 15990, + 0, + 15991, + 15995, + 15998, + 0, + 15999, + 0, + 16000, + 0, + 0, + 0, + 0, + 16008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16009, + 16011, + 0, + 16013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16014, + 0, + 0, + 16015, + 16023, + 16024, + 16025, + 0, + 0, + 16026, + 0, + 16030, + 0, + 16032, + 0, + 16033, + 0, + 0, + 0, + 0, + 0, + 0, + 16035, + 16036, + 16037, + 0, + 0, + 0, + 0, + 0, + 16039, + 0, + 0, + 0, + 0, + 16041, + 0, + 0, + 0, + 0, + 0, + 16043, + 16044, + 0, + 0, + 16047, + 0, + 0, + 0, + 16048, + 0, + 0, + 16049, + 16050, + 16052, + 0, + 0, + 0, + 0, + 0, + 16055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16056, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16058, + 16060, + 16061, + 0, + 0, + 16063, + 0, + 0, + 16064, + 0, + 0, + 0, + 16067, + 16068, + 0, + 0, + 16069, + 16078, + 0, + 0, + 0, + 16079, + 0, + 0, + 0, + 16080, + 0, + 16081, + 0, + 0, + 0, + 16088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16089, + 16093, + 0, + 16097, + 0, + 16103, + 0, + 16104, + 16105, + 0, + 0, + 16256, + 0, + 0, + 16259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16260, + 16261, + 0, + 0, + 16262, + 0, + 0, + 16263, + 0, + 16268, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16269, + 0, + 0, + 16270, + 16273, + 0, + 16274, + 0, + 0, + 0, + 0, + 16275, + 16276, + 16277, + 16280, + 0, + 0, + 0, + 16281, + 16284, + 0, + 0, + 0, + 16286, + 0, + 16289, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16290, + 0, + 0, + 0, + 0, + 16291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16292, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16293, + 16295, + 16297, + 0, + 16302, + 0, + 16304, + 0, + 16305, + 0, + 16306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16307, + 16308, + 16312, + 0, + 0, + 0, + 0, + 0, + 0, + 16313, + 16315, + 0, + 16318, + 0, + 0, + 0, + 16321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16326, + 16333, + 16336, + 0, + 0, + 0, + 0, + 16337, + 16340, + 0, + 0, + 0, + 0, + 0, + 16345, + 0, + 0, + 16346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16347, + 0, + 0, + 16348, + 0, + 0, + 0, + 0, + 16349, + 0, + 0, + 0, + 16350, + 0, + 16357, + 0, + 0, + 0, + 0, + 16359, + 16360, + 0, + 0, + 0, + 0, + 16362, + 16363, + 16364, + 16365, + 0, + 0, + 16366, + 0, + 0, + 0, + 0, + 16367, + 16368, + 0, + 16369, + 16374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16376, + 0, + 0, + 0, + 0, + 16378, + 16379, + 0, + 16380, + 0, + 0, + 0, + 16381, + 16383, + 0, + 0, + 0, + 0, + 0, + 16390, + 0, + 0, + 0, + 16399, + 0, + 16402, + 16404, + 16406, + 16407, + 0, + 0, + 0, + 16409, + 16411, + 0, + 0, + 0, + 0, + 16412, + 0, + 16413, + 16415, + 16423, + 0, + 0, + 0, + 0, + 0, + 16424, + 0, + 0, + 0, + 16428, + 16434, + 16435, + 16449, + 0, + 16450, + 16451, + 0, + 0, + 0, + 16453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16454, + 0, + 0, + 16456, + 16458, + 0, + 0, + 16459, + 0, + 0, + 16460, + 0, + 0, + 0, + 0, + 16462, + 0, + 16463, + 0, + 0, + 16466, + 0, + 0, + 0, + 0, + 0, + 16479, + 0, + 0, + 16480, + 0, + 16481, + 16484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16485, + 0, + 0, + 0, + 0, + 0, + 0, + 16489, + 0, + 0, + 0, + 0, + 0, + 16491, + 0, + 0, + 16498, + 0, + 0, + 16503, + 0, + 16505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16506, + 0, + 0, + 0, + 16508, + 16509, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16511, + 16513, + 0, + 0, + 0, + 16516, + 0, + 16517, + 0, + 16519, + 0, + 16529, + 0, + 0, + 16531, + 0, + 0, + 0, + 0, + 0, + 0, + 16534, + 0, + 0, + 16541, + 16542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16543, + 16547, + 16548, + 0, + 0, + 0, + 16551, + 0, + 16552, + 0, + 0, + 0, + 16553, + 0, + 0, + 16558, + 0, + 0, + 16562, + 16565, + 0, + 0, + 0, + 16570, + 0, + 0, + 0, + 16573, + 16585, + 0, + 0, + 0, + 16586, + 16587, + 16595, + 0, + 16596, + 0, + 16598, + 0, + 0, + 0, + 16600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16601, + 0, + 0, + 0, + 0, + 16603, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16604, + 16612, + 0, + 0, + 0, + 0, + 16613, + 0, + 16618, + 0, + 0, + 0, + 16640, + 0, + 0, + 16641, + 0, + 0, + 0, + 0, + 0, + 0, + 16645, + 0, + 0, + 0, + 0, + 16646, + 0, + 0, + 0, + 0, + 0, + 0, + 16651, + 0, + 0, + 0, + 0, + 16653, + 16654, + 0, + 0, + 0, + 16655, + 0, + 0, + 16656, + 16667, + 0, + 0, + 0, + 0, + 16671, + 0, + 16672, + 0, + 0, + 0, + 16673, + 0, + 0, + 0, + 0, + 0, + 16676, + 0, + 16686, + 0, + 0, + 0, + 0, + 16689, + 0, + 16690, + 0, + 16692, + 0, + 16693, + 0, + 16694, + 0, + 16696, + 0, + 0, + 0, + 16705, + 0, + 0, + 0, + 0, + 0, + 0, + 16707, + 0, + 0, + 0, + 16709, + 0, + 0, + 0, + 0, + 16711, + 0, + 16712, + 16713, + 0, + 0, + 0, + 16715, + 0, + 0, + 0, + 0, + 16716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16718, + 16724, + 0, + 0, + 16726, + 16727, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16728, + 0, + 16729, + 0, + 0, + 16730, + 0, + 0, + 0, + 0, + 0, + 16731, + 0, + 0, + 0, + 16732, + 0, + 0, + 0, + 0, + 16734, + 16738, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16743, + 0, + 0, + 16745, + 0, + 0, + 0, + 0, + 0, + 16749, + 0, + 16752, + 0, + 0, + 0, + 0, + 16756, + 0, + 0, + 16758, + 0, + 16759, + 0, + 0, + 0, + 0, + 0, + 16760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16762, + 0, + 16769, + 0, + 16770, + 0, + 16772, + 0, + 0, + 0, + 16777, + 16780, + 0, + 0, + 0, + 0, + 0, + 0, + 16781, + 0, + 0, + 16782, + 0, + 16784, + 0, + 0, + 16785, + 16787, + 16792, + 0, + 0, + 16794, + 0, + 0, + 0, + 16798, + 0, + 0, + 16809, + 0, + 0, + 16814, + 16816, + 16817, + 0, + 16819, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16820, + 0, + 0, + 16836, + 16839, + 0, + 0, + 16841, + 16851, + 16857, + 0, + 0, + 16858, + 16859, + 0, + 0, + 16860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16862, + 0, + 16863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16864, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16876, + 0, + 16881, + 16882, + 0, + 16885, + 16886, + 0, + 16887, + 0, + 0, + 0, + 16889, + 16891, + 0, + 0, + 0, + 0, + 0, + 16894, + 16895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16897, + 0, + 16898, + 0, + 0, + 0, + 0, + 0, + 16913, + 0, + 0, + 16924, + 16925, + 16926, + 0, + 0, + 16927, + 0, + 0, + 0, + 16937, + 16938, + 0, + 0, + 0, + 16940, + 16941, + 0, + 0, + 0, + 16942, + 16945, + 0, + 16946, + 16949, + 16950, + 0, + 0, + 0, + 16952, + 16955, + 0, + 0, + 0, + 16965, + 0, + 16969, + 0, + 0, + 16975, + 0, + 0, + 16976, + 0, + 0, + 0, + 0, + 16978, + 0, + 0, + 16981, + 0, + 16983, + 16989, + 0, + 0, + 0, + 0, + 16990, + 0, + 0, + 16991, + 0, + 0, + 0, + 16993, + 0, + 16994, + 16996, + 17000, + 0, + 0, + 0, + 0, + 0, + 17002, + 17004, + 0, + 17006, + 0, + 0, + 17007, + 0, + 0, + 0, + 0, + 17008, + 17013, + 17014, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17021, + 0, + 17031, + 0, + 0, + 0, + 0, + 0, + 17033, + 17036, + 0, + 17038, + 0, + 0, + 17039, + 0, + 17045, + 0, + 0, + 17046, + 17047, + 0, + 0, + 0, + 0, + 17048, + 0, + 17049, + 17050, + 0, + 17051, + 17053, + 0, + 17054, + 0, + 17055, + 0, + 0, + 0, + 0, + 0, + 17063, + 0, + 0, + 17064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17065, + 0, + 0, + 17068, + 0, + 0, + 0, + 0, + 0, + 17072, + 0, + 0, + 0, + 0, + 0, + 0, + 17073, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17074, + 0, + 17080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17081, + 17083, + 17084, + 0, + 0, + 0, + 17085, + 0, + 0, + 0, + 0, + 17092, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17093, + 0, + 17095, + 17102, + 0, + 0, + 0, + 0, + 0, + 0, + 17103, + 0, + 0, + 17105, + 0, + 17107, + 0, + 0, + 0, + 0, + 17114, + 0, + 0, + 0, + 0, + 0, + 17115, + 17125, + 17127, + 0, + 0, + 17128, + 0, + 0, + 0, + 17129, + 17130, + 0, + 17131, + 0, + 0, + 0, + 0, + 0, + 17132, + 17135, + 17145, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17146, + 0, + 17147, + 0, + 17148, + 0, + 0, + 0, + 0, + 0, + 0, + 17149, + 17150, + 0, + 17151, + 17153, + 0, + 17155, + 0, + 0, + 0, + 0, + 17163, + 17171, + 0, + 17174, + 0, + 0, + 0, + 0, + 17179, + 0, + 0, + 17182, + 17185, + 0, + 0, + 0, + 0, + 0, + 17186, + 0, + 0, + 17188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17189, + 17191, + 0, + 17194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17195, + 17196, + 17203, + 17204, + 0, + 0, + 17205, + 17217, + 0, + 0, + 0, + 0, + 0, + 17218, + 0, + 0, + 0, + 0, + 17219, + 0, + 17220, + 0, + 17221, + 0, + 0, + 17230, + 0, + 0, + 0, + 0, + 0, + 17236, + 0, + 17238, + 17239, + 0, + 0, + 0, + 17241, + 17244, + 0, + 0, + 17245, + 0, + 17248, + 0, + 0, + 17251, + 0, + 17252, + 0, + 0, + 17264, + 0, + 17266, + 0, + 0, + 0, + 17268, + 0, + 0, + 0, + 0, + 17271, + 17272, + 0, + 17273, + 0, + 17295, + 0, + 17302, + 0, + 17305, + 0, + 0, + 0, + 17306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17309, + 0, + 17310, + 17313, + 0, + 0, + 0, + 0, + 17314, + 17315, + 0, + 17317, + 0, + 0, + 0, + 0, + 17318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17320, + 0, + 0, + 0, + 0, + 0, + 0, + 17334, + 0, + 17344, + 17348, + 0, + 0, + 0, + 17350, + 17351, + 0, + 0, + 17353, + 0, + 0, + 17354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17355, + 0, + 0, + 0, + 0, + 0, + 0, + 17356, + 17357, + 0, + 0, + 17359, + 0, + 0, + 0, + 17371, + 0, + 17372, + 0, + 0, + 0, + 17393, + 0, + 0, + 0, + 0, + 17394, + 0, + 0, + 0, + 0, + 0, + 17395, + 0, + 0, + 17399, + 0, + 0, + 0, + 17401, + 17417, + 0, + 17418, + 0, + 17419, + 0, + 0, + 0, + 0, + 0, + 17422, + 17423, + 0, + 0, + 0, + 0, + 0, + 17424, + 0, + 0, + 0, + 0, + 0, + 17428, + 17429, + 17433, + 0, + 0, + 0, + 17437, + 0, + 0, + 17441, + 0, + 0, + 17442, + 0, + 0, + 17453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17454, + 17456, + 17462, + 0, + 0, + 17466, + 0, + 0, + 17468, + 0, + 0, + 17469, + 0, + 0, + 0, + 0, + 17470, + 0, + 17475, + 0, + 0, + 0, + 0, + 0, + 17479, + 0, + 0, + 0, + 17483, + 17484, + 0, + 17485, + 0, + 17486, + 0, + 17491, + 17492, + 0, + 0, + 17493, + 0, + 17494, + 17495, + 0, + 0, + 0, + 17496, + 0, + 0, + 0, + 17497, + 0, + 0, + 0, + 17502, + 0, + 0, + 0, + 0, + 0, + 17503, + 0, + 17505, + 0, + 17507, + 0, + 0, + 0, + 17512, + 17513, + 17514, + 0, + 0, + 17515, + 0, + 0, + 0, + 17519, + 0, + 0, + 0, + 17522, + 0, + 0, + 17523, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17527, + 0, + 0, + 0, + 17528, + 0, + 0, + 0, + 17534, + 0, + 0, + 0, + 0, + 17536, + 0, + 0, + 0, + 17539, + 0, + 17540, + 17543, + 17549, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17556, + 0, + 0, + 17558, + 0, + 17559, + 0, + 0, + 17560, + 0, + 0, + 0, + 17563, + 0, + 0, + 0, + 0, + 0, + 0, + 17564, + 0, + 0, + 17565, + 17566, + 0, + 17567, + 0, + 0, + 0, + 0, + 0, + 0, + 17569, + 17570, + 0, + 17575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17581, + 0, + 0, + 0, + 17582, + 17583, + 0, + 17586, + 0, + 0, + 17587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17588, + 0, + 0, + 0, + 0, + 17596, + 17597, + 0, + 0, + 17598, + 17600, + 0, + 0, + 0, + 0, + 0, + 0, + 17601, + 0, + 0, + 0, + 17604, + 0, + 0, + 17605, + 0, + 0, + 17607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17612, + 0, + 0, + 17618, + 0, + 17621, + 17622, + 0, + 0, + 0, + 0, + 17623, + 0, + 0, + 17624, + 0, + 0, + 17630, + 0, + 0, + 17631, + 17633, + 17634, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17635, + 0, + 0, + 17636, + 0, + 0, + 17637, + 0, + 17638, + 0, + 17640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17643, + 0, + 0, + 0, + 0, + 17645, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17646, + 17662, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17663, + 17664, + 0, + 17665, + 17666, + 0, + 0, + 0, + 17669, + 17671, + 17673, + 0, + 17679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17684, + 0, + 0, + 0, + 17686, + 0, + 17714, + 0, + 0, + 17720, + 17722, + 17726, + 0, + 0, + 17728, + 0, + 0, + 17729, + 0, + 0, + 0, + 17732, + 0, + 17733, + 0, + 17734, + 0, + 0, + 0, + 17735, + 0, + 0, + 0, + 0, + 17737, + 0, + 0, + 0, + 0, + 17739, + 0, + 0, + 0, + 17741, + 17742, + 0, + 0, + 0, + 0, + 17743, + 17744, + 17745, + 0, + 0, + 0, + 17749, + 0, + 17750, + 17751, + 17752, + 17754, + 17761, + 17762, + 0, + 17763, + 0, + 17766, + 0, + 17772, + 0, + 0, + 0, + 0, + 0, + 17775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17776, + 0, + 0, + 17777, + 0, + 0, + 17778, + 17779, + 0, + 17782, + 17783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17821, + 0, + 0, + 0, + 17822, + 0, + 0, + 0, + 17823, + 17825, + 0, + 0, + 0, + 0, + 0, + 17826, + 17831, + 17832, + 17833, + 0, + 0, + 17845, + 0, + 0, + 0, + 17846, + 0, + 0, + 0, + 17848, + 17850, + 17854, + 0, + 17855, + 0, + 0, + 17859, + 0, + 0, + 0, + 0, + 0, + 0, + 17860, + 17861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17870, + 17871, + 0, + 0, + 0, + 0, + 0, + 0, + 17872, + 0, + 0, + 0, + 17879, + 0, + 0, + 0, + 17881, + 17883, + 0, + 17884, + 0, + 17885, + 0, + 0, + 17886, + 0, + 0, + 17887, + 17891, + 17953, + 0, + 0, + 0, + 0, + 17954, + 0, + 0, + 17955, + 0, + 17968, + 0, + 0, + 17972, + 0, + 0, + 0, + 0, + 0, + 17974, + 0, + 0, + 0, + 0, + 17976, + 17978, + 0, + 0, + 17983, + 0, + 0, + 0, + 0, + 18003, + 0, + 0, + 0, + 0, + 0, + 18007, + 0, + 0, + 0, + 0, + 0, + 18009, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18010, + 0, + 0, + 0, + 0, + 0, + 0, + 18012, + 0, + 0, + 18014, + 0, + 0, + 0, + 18015, + 0, + 0, + 0, + 18016, + 0, + 18017, + 0, + 0, + 0, + 18030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18031, + 0, + 0, + 18036, + 18037, + 18038, + 0, + 0, + 18049, + 18056, + 0, + 18057, + 18058, + 0, + 18059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18062, + 0, + 0, + 0, + 0, + 18064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18067, + 0, + 0, + 0, + 18068, + 0, + 0, + 18075, + 0, + 0, + 18078, + 18093, + 18094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18097, + 0, + 0, + 0, + 0, + 0, + 18098, + 18100, + 0, + 0, + 0, + 18108, + 0, + 18111, + 0, + 0, + 18112, + 0, + 18113, + 0, + 0, + 18115, + 18116, + 0, + 18118, + 0, + 0, + 0, + 0, + 18121, + 0, + 0, + 0, + 0, + 18123, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18124, + 0, + 0, + 0, + 0, + 18125, + 18126, + 0, + 18127, + 0, + 0, + 18128, + 18135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18150, + 0, + 0, + 0, + 0, + 0, + 18151, + 18152, + 0, + 0, + 18156, + 18164, + 0, + 18166, + 18171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18172, + 18183, + 0, + 18184, + 0, + 0, + 0, + 0, + 18185, + 0, + 18187, + 0, + 0, + 0, + 0, + 0, + 18188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18189, + 0, + 0, + 18190, + 0, + 0, + 18191, + 18192, + 0, + 0, + 18194, + 18195, + 18196, + 0, + 0, + 0, + 18197, + 0, + 18203, + 0, + 18204, + 0, + 0, + 0, + 0, + 18205, + 0, + 0, + 0, + 18207, + 18208, + 0, + 0, + 18214, + 0, + 0, + 0, + 18215, + 18216, + 0, + 0, + 0, + 18220, + 0, + 0, + 18222, + 0, + 0, + 0, + 0, + 0, + 18223, + 0, + 18225, + 18231, + 0, + 18234, + 0, + 18235, + 0, + 0, + 0, + 0, + 18240, + 0, + 0, + 18241, + 18242, + 0, + 0, + 0, + 0, + 0, + 18243, + 18251, + 0, + 18253, + 0, + 18254, + 0, + 0, + 0, + 18266, + 0, + 0, + 0, + 0, + 0, + 0, + 18269, + 18270, + 18271, + 18273, + 18281, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18282, + 0, + 18283, + 0, + 18284, + 0, + 0, + 0, + 0, + 0, + 0, + 18285, + 0, + 18287, + 18289, + 0, + 0, + 18290, + 0, + 0, + 0, + 0, + 18308, + 0, + 0, + 0, + 18310, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18311, + 0, + 18312, + 18313, + 0, + 18315, + 0, + 0, + 18316, + 18320, + 0, + 18331, + 0, + 18332, + 0, + 18336, + 0, + 0, + 0, + 0, + 18337, + 0, + 18340, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18341, + 0, + 18344, + 18345, + 0, + 18346, + 0, + 0, + 0, + 0, + 0, + 18348, + 0, + 18351, + 0, + 0, + 18356, + 0, + 0, + 0, + 0, + 0, + 0, + 18357, + 0, + 0, + 0, + 0, + 0, + 18367, + 0, + 0, + 0, + 18368, + 0, + 18369, + 0, + 18370, + 18371, + 0, + 0, + 0, + 18437, + 18444, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18445, + 18450, + 0, + 0, + 0, + 0, + 18451, + 0, + 18452, + 0, + 0, + 0, + 18453, + 0, + 0, + 0, + 0, + 0, + 18455, + 0, + 0, + 0, + 18456, + 0, + 18457, + 0, + 18460, + 0, + 0, + 18461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18466, + 0, + 0, + 18467, + 0, + 0, + 0, + 0, + 18473, + 0, + 0, + 0, + 18476, + 0, + 18477, + 0, + 0, + 0, + 18478, + 18479, + 18480, + 0, + 0, + 0, + 18485, + 0, + 0, + 0, + 18486, + 0, + 0, + 0, + 0, + 0, + 0, + 18488, + 18490, + 0, + 0, + 0, + 0, + 0, + 0, + 18491, + 0, + 0, + 0, + 0, + 0, + 18495, + 0, + 0, + 18496, + 0, + 0, + 0, + 0, + 0, + 0, + 18505, + 0, + 18521, + 0, + 18522, + 18523, + 0, + 0, + 0, + 18525, + 18526, + 0, + 0, + 0, + 0, + 0, + 18527, + 0, + 0, + 0, + 0, + 18532, + 18533, + 0, + 18534, + 0, + 0, + 0, + 0, + 0, + 0, + 18535, + 18537, + 0, + 18538, + 0, + 0, + 0, + 0, + 0, + 0, + 18540, + 18541, + 18542, + 18543, + 0, + 18546, + 0, + 0, + 0, + 0, + 18553, + 18556, + 0, + 0, + 18558, + 0, + 0, + 18569, + 18571, + 0, + 0, + 0, + 18572, + 0, + 18574, + 0, + 0, + 0, + 0, + 18586, + 0, + 0, + 0, + 0, + 0, + 18588, + 0, + 0, + 18589, + 0, + 0, + 0, + 0, + 0, + 0, + 18590, + 0, + 18592, + 0, + 0, + 0, + 0, + 18594, + 0, + 0, + 0, + 18596, + 0, + 0, + 18597, + 18598, + 0, + 0, + 18601, + 0, + 0, + 0, + 0, + 18602, + 0, + 0, + 0, + 18603, + 18604, + 0, + 18605, + 0, + 0, + 0, + 0, + 18608, + 0, + 0, + 18611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18612, + 0, + 18616, + 0, + 0, + 18617, + 18619, + 0, + 0, + 0, + 18628, + 0, + 0, + 0, + 18629, + 0, + 0, + 18630, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18631, + 0, + 18632, + 0, + 0, + 18635, + 18637, + 0, + 0, + 0, + 0, + 0, + 0, + 18641, + 18643, + 18648, + 0, + 18652, + 0, + 0, + 18653, + 0, + 18655, + 18656, + 0, + 0, + 0, + 18657, + 0, + 0, + 18666, + 18674, + 0, + 0, + 0, + 0, + 18677, + 18684, + 18685, + 0, + 0, + 18686, + 0, + 0, + 18690, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18695, + 18696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18697, + 0, + 0, + 18700, + 0, + 0, + 0, + 0, + 0, + 0, + 18702, + 0, + 18708, + 0, + 0, + 18709, + 0, + 18710, + 0, + 0, + 18711, + 0, + 18714, + 0, + 0, + 18718, + 0, + 0, + 0, + 0, + 0, + 0, + 18719, + 0, + 0, + 18722, + 0, + 18726, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18731, + 0, + 0, + 0, + 0, + 0, + 18739, + 18741, + 0, + 0, + 18742, + 0, + 18743, + 18744, + 18746, + 18748, + 0, + 18752, + 18753, + 0, + 0, + 18754, + 18763, + 0, + 18765, + 0, + 0, + 0, + 18766, + 0, + 0, + 0, + 18769, + 0, + 0, + 0, + 0, + 0, + 18773, + 18778, + 18779, + 18781, + 0, + 0, + 18784, + 18787, + 0, + 18788, + 0, + 18793, + 0, + 0, + 0, + 0, + 0, + 0, + 18795, + 0, + 0, + 18800, + 0, + 0, + 0, + 0, + 0, + 18801, + 18804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18806, + 0, + 0, + 0, + 18811, + 18815, + 18816, + 0, + 0, + 0, + 0, + 18825, + 0, + 0, + 18827, + 18829, + 0, + 0, + 18830, + 0, + 0, + 0, + 0, + 18831, + 0, + 0, + 18832, + 0, + 0, + 0, + 0, + 18833, + 0, + 18840, + 0, + 18841, + 0, + 18842, + 0, + 0, + 0, + 0, + 18843, + 0, + 18844, + 0, + 0, + 0, + 0, + 0, + 0, + 18845, + 18846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18848, + 0, + 0, + 0, + 18853, + 18860, + 0, + 0, + 18862, + 18866, + 0, + 0, + 18867, + 18869, + 0, + 0, + 18874, + 18881, + 18891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18895, + 0, + 18896, + 0, + 0, + 0, + 18900, + 0, + 0, + 0, + 18901, + 0, + 18902, + 18915, + 18916, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18919, + 0, + 0, + 0, + 0, + 0, + 18920, + 0, + 0, + 0, + 18921, + 18929, + 0, + 0, + 0, + 0, + 18930, + 0, + 0, + 0, + 0, + 0, + 0, + 18932, + 0, + 0, + 0, + 0, + 18934, + 18942, + 0, + 0, + 0, + 18951, + 18957, + 0, + 0, + 0, + 0, + 18958, + 0, + 0, + 0, + 0, + 18959, + 18960, + 0, + 0, + 18961, + 0, + 0, + 18962, + 0, + 0, + 0, + 0, + 18963, + 18964, + 0, + 0, + 0, + 18965, + 0, + 18967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18968, + 0, + 18969, + 0, + 18970, + 18973, + 18976, + 0, + 0, + 0, + 0, + 0, + 0, + 18977, + 0, + 0, + 0, + 18981, + 0, + 0, + 0, + 18990, + 0, + 18998, + 0, + 0, + 0, + 0, + 0, + 18999, + 19003, + 0, + 0, + 19005, + 0, + 0, + 0, + 19006, + 0, + 0, + 0, + 0, + 0, + 0, + 19008, + 19011, + 0, + 0, + 19018, + 0, + 0, + 19019, + 0, + 19024, + 0, + 19031, + 19032, + 0, + 19039, + 0, + 19041, + 19050, + 0, + 0, + 0, + 19051, + 19055, + 19056, + 0, + 19059, + 19063, + 19064, + 0, + 0, + 19088, + 0, + 0, + 0, + 19093, + 19094, + 0, + 0, + 0, + 0, + 19095, + 0, + 19096, + 0, + 0, + 0, + 19097, + 0, + 0, + 19098, + 0, + 19099, + 19100, + 0, + 0, + 19103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19111, + 0, + 0, + 0, + 0, + 0, + 0, + 19112, + 0, + 0, + 0, + 19116, + 19117, + 0, + 19121, + 19122, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19123, + 19124, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19125, + 19126, + 0, + 19128, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19129, + 19130, + 19131, + 19132, + 0, + 0, + 19146, + 0, + 0, + 19147, + 19156, + 19158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19182, + 19185, + 0, + 0, + 19187, + 0, + 0, + 0, + 19193, + 0, + 0, + 0, + 0, + 0, + 19194, + 0, + 19197, + 0, + 0, + 0, + 0, + 19198, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19202, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19203, + 0, + 19205, + 19210, + 0, + 0, + 0, + 19213, + 0, + 19218, + 0, + 0, + 0, + 19223, + 19229, + 0, + 0, + 19230, + 0, + 0, + 19231, + 19232, + 19233, + 19239, + 0, + 0, + 0, + 0, + 0, + 19240, + 0, + 19248, + 19249, + 0, + 0, + 0, + 0, + 19254, + 0, + 19256, + 19258, + 19259, + 0, + 0, + 19261, + 0, + 19266, + 0, + 0, + 0, + 19272, + 0, + 19278, + 19281, + 19282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19283, + 0, + 0, + 19284, + 0, + 0, + 19285, + 19287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19288, + 19291, + 0, + 19292, + 0, + 0, + 0, + 0, + 19297, + 0, + 19298, + 0, + 0, + 0, + 0, + 19302, + 19303, + 0, + 0, + 0, + 0, + 19304, + 19305, + 0, + 0, + 0, + 0, + 19314, + 0, + 0, + 19315, + 0, + 0, + 19321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19322, + 0, + 19333, + 0, + 19334, + 19335, + 0, + 19336, + 19337, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19346, + 0, + 0, + 19353, + 0, + 19354, + 19362, + 0, + 19366, + 19367, + 0, + 0, + 19369, + 0, + 19375, + 0, + 19377, + 19380, + 19388, + 0, + 0, + 0, + 0, + 0, + 19389, + 19390, + 0, + 0, + 0, + 0, + 19392, + 0, + 0, + 0, + 0, + 0, + 19402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19412, + 0, + 0, + 19413, + 19422, + 0, + 19424, + 0, + 0, + 0, + 19425, + 0, + 0, + 0, + 19428, + 0, + 0, + 0, + 0, + 19431, + 0, + 0, + 0, + 0, + 0, + 19432, + 0, + 0, + 0, + 0, + 0, + 19448, + 19459, + 0, + 0, + 19461, + 0, + 19462, + 19463, + 0, + 19467, + 19474, + 19482, + 0, + 0, + 0, + 0, + 19494, + 0, + 0, + 0, + 0, + 19501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19502, + 19504, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19505, + 0, + 0, + 0, + 0, + 19506, + 19507, + 0, + 0, + 0, + 19508, + 0, + 0, + 19511, + 0, + 0, + 19514, + 0, + 19515, + 0, + 19516, + 0, + 19518, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19530, + 0, + 19537, + 19538, + 0, + 19543, + 19546, + 0, + 19547, + 19551, + 0, + 0, + 0, + 0, + 0, + 0, + 19552, + 19553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19555, + 0, + 0, + 19556, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19560, + 19561, + 0, + 0, + 19562, + 0, + 0, + 0, + 0, + 0, + 0, + 19565, + 19567, + 0, + 19568, + 0, + 0, + 0, + 19569, + 19570, + 0, + 19578, + 0, + 0, + 0, + 0, + 19580, + 0, + 0, + 0, + 0, + 19581, + 19584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19585, + 19586, + 0, + 0, + 0, + 19587, + 19588, + 0, + 19589, + 0, + 0, + 0, + 0, + 0, + 0, + 19592, + 19593, + 19599, + 0, + 19600, + 0, + 0, + 19604, + 0, + 0, + 19605, + 0, + 19606, + 19608, + 19610, + 0, + 19613, + 19614, + 0, + 0, + 0, + 0, + 0, + 0, + 19616, + 19617, + 0, + 0, + 19618, + 0, + 0, + 19619, + 0, + 0, + 0, + 19620, + 19621, + 19631, + 0, + 0, + 19632, + 19634, + 19636, + 0, + 19643, + 0, + 0, + 19644, + 19658, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19675, + 19677, + 0, + 0, + 0, + 0, + 19679, + 0, + 19683, + 0, + 19684, + 0, + 0, + 0, + 0, + 0, + 0, + 19687, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19688, + 19689, + 19692, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19695, + 19697, + 0, + 0, + 0, + 0, + 0, + 19698, + 19699, + 0, + 0, + 19700, + 0, + 19702, + 0, + 0, + 19703, + 0, + 0, + 0, + 0, + 0, + 0, + 19704, + 19708, + 0, + 19710, + 0, + 19713, + 0, + 0, + 0, + 19715, + 0, + 0, + 0, + 0, + 19718, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19720, + 0, + 19722, + 0, + 0, + 19725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19730, + 0, + 0, + 0, + 0, + 0, + 19731, + 0, + 19734, + 19735, + 19739, + 0, + 0, + 19740, + 0, + 19741, + 0, + 0, + 0, + 19746, + 0, + 0, + 19747, + 0, + 19771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19772, + 19775, + 0, + 0, + 0, + 0, + 0, + 0, + 19778, + 0, + 0, + 0, + 0, + 0, + 19779, + 0, + 0, + 19780, + 19790, + 0, + 19791, + 0, + 0, + 19792, + 0, + 0, + 0, + 19793, + 0, + 0, + 19796, + 19797, + 0, + 0, + 0, + 19799, + 0, + 0, + 0, + 19801, + 0, + 0, + 0, + 0, + 19803, + 0, + 19804, + 0, + 19805, + 0, + 0, + 19807, + 0, + 0, + 0, + 19808, + 0, + 0, + 0, + 0, + 0, + 0, + 19809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19816, + 0, + 19821, + 0, + 19822, + 19830, + 19831, + 0, + 0, + 0, + 19833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19838, + 0, + 0, + 0, + 0, + 19839, + 0, + 0, + 19843, + 0, + 0, + 0, + 0, + 19845, + 0, + 0, + 0, + 0, + 19847, + 0, + 0, + 19848, + 0, + 19849, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19851, + 0, + 0, + 0, + 19854, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19864, + 0, + 19865, + 0, + 19866, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19868, + 0, + 0, + 19870, + 0, + 0, + 19871, + 0, + 0, + 19872, + 19873, + 19875, + 0, + 19880, + 19882, + 19884, + 0, + 0, + 19885, + 19886, + 19888, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19890, + 19892, + 19893, + 0, + 0, + 19894, + 0, + 0, + 0, + 19895, + 0, + 19896, + 19902, + 0, + 0, + 19903, + 0, + 0, + 19905, + 0, + 0, + 0, + 19906, + 0, + 19908, + 0, + 19909, + 19911, + 0, + 0, + 0, + 19913, + 19920, + 0, + 19938, + 19939, + 19940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19942, + 0, + 19943, + 0, + 19945, + 0, + 0, + 0, + 19951, + 19952, + 19954, + 19960, + 0, + 19965, + 0, + 19971, + 0, + 0, + 0, + 0, + 0, + 19975, + 0, + 19976, + 0, + 19990, + 0, + 0, + 19991, + 0, + 19993, + 0, + 19995, + 0, + 0, + 0, + 19998, + 19999, + 20001, + 0, + 20003, + 20005, + 0, + 20011, + 20012, + 0, + 0, + 0, + 0, + 0, + 0, + 20014, + 0, + 20020, + 0, + 0, + 0, + 0, + 20021, + 0, + 0, + 0, + 0, + 0, + 20023, + 20024, + 0, + 0, + 0, + 0, + 0, + 20025, + 0, + 0, + 20027, + 0, + 0, + 20029, + 0, + 0, + 20032, + 0, + 0, + 0, + 0, + 20044, + 20045, + 0, + 20048, + 20049, + 0, + 0, + 20050, + 0, + 20052, + 0, + 0, + 20054, + 20057, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20059, + 0, + 0, + 20061, + 0, + 20062, + 0, + 20064, + 0, + 0, + 20066, + 0, + 0, + 20067, + 0, + 0, + 0, + 0, + 20069, + 0, + 0, + 0, + 0, + 0, + 0, + 20070, + 20071, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20072, + 0, + 0, + 20073, + 20074, + 0, + 0, + 0, + 0, + 0, + 20075, + 0, + 20078, + 0, + 0, + 0, + 0, + 20080, + 0, + 20081, + 0, + 0, + 0, + 0, + 0, + 0, + 20095, + 0, + 20098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20107, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20112, + 0, + 0, + 0, + 20113, + 20114, + 0, + 0, + 0, + 20115, + 20123, + 20124, + 0, + 0, + 0, + 20131, + 20133, + 20134, + 0, + 0, + 0, + 0, + 20136, + 0, + 0, + 20137, + 20138, + 20150, + 0, + 20152, + 0, + 0, + 0, + 20153, + 0, + 0, + 20154, + 0, + 0, + 0, + 20158, + 0, + 20163, + 0, + 0, + 20164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20166, + 0, + 20168, + 0, + 20170, + 0, + 20175, + 0, + 0, + 20178, + 0, + 0, + 0, + 0, + 20223, + 0, + 0, + 0, + 0, + 20224, + 0, + 20226, + 0, + 0, + 20230, + 0, + 20231, + 0, + 0, + 0, + 0, + 20232, + 0, + 0, + 20233, + 20234, + 0, + 20244, + 0, + 20247, + 0, + 0, + 0, + 0, + 0, + 0, + 20249, + 0, + 0, + 0, + 20250, + 0, + 0, + 0, + 0, + 20251, + 0, + 20253, + 0, + 20254, + 0, + 0, + 0, + 0, + 20256, + 0, + 0, + 20264, + 0, + 0, + 0, + 0, + 20266, + 0, + 0, + 0, + 20278, + 0, + 0, + 20279, + 20282, + 0, + 0, + 0, + 0, + 0, + 20283, + 0, + 20284, + 0, + 20285, + 0, + 20287, + 20290, + 0, + 0, + 0, + 0, + 20292, + 0, + 0, + 0, + 0, + 20293, + 20297, + 0, + 0, + 0, + 0, + 0, + 0, + 20299, + 0, + 20300, + 20303, + 0, + 0, + 0, + 0, + 0, + 0, + 20307, + 0, + 0, + 20308, + 0, + 20309, + 0, + 20310, + 0, + 0, + 0, + 0, + 0, + 0, + 20312, + 0, + 0, + 0, + 20314, + 0, + 0, + 0, + 0, + 20315, + 20316, + 0, + 20322, + 0, + 0, + 0, + 0, + 0, + 0, + 20339, + 0, + 0, + 0, + 20342, + 0, + 0, + 0, + 0, + 20352, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20362, + 0, + 0, + 20365, + 0, + 20375, + 20377, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20378, + 20379, + 0, + 20380, + 0, + 0, + 20381, + 0, + 20382, + 0, + 20383, + 0, + 20388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20390, + 20392, + 20393, + 0, + 0, + 20395, + 0, + 0, + 0, + 0, + 0, + 20396, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20398, + 20415, + 0, + 0, + 0, + 20417, + 0, + 0, + 20420, + 0, + 0, + 20426, + 20428, + 0, + 20431, + 0, + 0, + 20432, + 0, + 20433, + 20434, + 20435, + 0, + 0, + 0, + 0, + 20440, + 0, + 0, + 0, + 0, + 0, + 20442, + 0, + 20443, + 0, + 20446, + 0, + 0, + 0, + 0, + 20448, + 0, + 20451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20452, + 20453, + 0, + 0, + 20454, + 0, + 0, + 0, + 0, + 0, + 0, + 20457, + 0, + 20458, + 0, + 0, + 0, + 20465, + 0, + 0, + 0, + 0, + 0, + 20469, + 0, + 0, + 0, + 20473, + 0, + 20476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20477, + 0, + 0, + 20485, + 0, + 0, + 20486, + 0, + 0, + 20487, + 0, + 20496, + 0, + 20497, + 0, + 0, + 20498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20499, + 20500, + 0, + 20501, + 0, + 0, + 0, + 0, + 0, + 20520, + 20527, + 0, + 20529, + 0, + 0, + 0, + 0, + 20539, + 0, + 0, + 20540, + 0, + 0, + 0, + 20543, + 0, + 0, + 0, + 20546, + 0, + 0, + 0, + 0, + 0, + 20548, + 0, + 0, + 20563, + 0, + 0, + 20564, + 0, + 20566, + 0, + 0, + 0, + 0, + 0, + 20589, + 0, + 0, + 0, + 0, + 20590, + 0, + 0, + 20593, + 20594, + 0, + 0, + 0, + 0, + 20595, + 0, + 20597, + 20598, + 0, + 0, + 0, + 20618, + 20620, + 0, + 0, + 0, + 0, + 20621, + 0, + 0, + 0, + 0, + 20627, + 0, + 0, + 0, + 0, + 0, + 20628, + 0, + 0, + 0, + 20629, + 0, + 20630, + 0, + 0, + 20639, + 0, + 0, + 0, + 0, + 0, + 20707, + 0, + 0, + 20709, + 0, + 0, + 0, + 20713, + 20714, + 0, + 0, + 0, + 0, + 0, + 20724, + 20725, + 0, + 0, + 0, + 0, + 20726, + 20728, + 20729, + 0, + 20733, + 0, + 20734, + 0, + 20735, + 20736, + 0, + 20737, + 0, + 0, + 20744, + 0, + 20745, + 0, + 20748, + 0, + 0, + 20749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20750, + 0, + 0, + 0, + 0, + 20754, + 0, + 0, + 0, + 20761, + 0, + 0, + 20763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20766, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20767, + 0, + 0, + 0, + 0, + 20768, + 0, + 20769, + 20777, + 0, + 0, + 0, + 0, + 0, + 0, + 20785, + 0, + 0, + 0, + 20786, + 20795, + 20801, + 0, + 20802, + 0, + 20807, + 0, + 0, + 20808, + 0, + 0, + 20810, + 0, + 0, + 20811, + 0, + 20812, + 0, + 0, + 0, + 0, + 0, + 20813, + 0, + 0, + 20818, + 20820, + 20821, + 0, + 0, + 0, + 20822, + 0, + 20823, + 0, + 0, + 0, + 20826, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20829, + 20830, + 20831, + 0, + 20832, + 20836, + 0, + 0, + 20839, + 0, + 0, + 20840, + 20842, + 0, + 20843, + 0, + 20844, + 0, + 20854, + 0, + 0, + 0, + 20855, + 0, + 0, + 0, + 0, + 20856, + 0, + 0, + 0, + 20869, + 0, + 0, + 20871, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20873, + 0, + 0, + 0, + 0, + 0, + 20876, + 0, + 0, + 0, + 0, + 0, + 20880, + 0, + 0, + 20882, + 0, + 0, + 0, + 0, + 20883, + 20884, + 0, + 0, + 20890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20891, + 0, + 0, + 0, + 0, + 0, + 20905, + 0, + 20906, + 20910, + 0, + 0, + 20912, + 20915, + 0, + 0, + 0, + 0, + 0, + 20916, + 0, + 20917, + 0, + 20919, + 20920, + 20922, + 0, + 20927, + 0, + 20928, + 20929, + 20930, + 0, + 0, + 20935, + 0, + 0, + 20939, + 0, + 0, + 20941, + 0, + 0, + 0, + 20943, + 0, + 0, + 0, + 20946, + 20947, + 0, + 0, + 0, + 0, + 0, + 20950, + 0, + 20954, + 0, + 0, + 20955, + 20964, + 0, + 0, + 20967, + 0, + 0, + 0, + 0, + 0, + 20973, + 20975, + 0, + 0, + 0, + 20984, + 0, + 20987, + 20988, + 0, + 0, + 0, + 0, + 0, + 20989, + 0, + 0, + 0, + 20995, + 0, + 20998, + 0, + 20999, + 0, + 0, + 0, + 0, + 21000, + 21001, + 0, + 0, + 0, + 0, + 21008, + 0, + 21010, + 0, + 21016, + 0, + 0, + 0, + 21017, + 21018, + 0, + 0, + 0, + 0, + 0, + 21021, + 21026, + 21027, + 21028, + 0, + 0, + 21029, + 0, + 0, + 0, + 0, + 0, + 21030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21031, + 21032, + 0, + 0, + 0, + 0, + 0, + 21037, + 0, + 0, + 21038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21039, + 0, + 21041, + 0, + 21046, + 21047, + 0, + 0, + 0, + 21049, + 21053, + 0, + 0, + 21057, + 21064, + 21065, + 0, + 0, + 21066, + 21067, + 0, + 0, + 0, + 21069, + 0, + 0, + 0, + 21071, + 21072, + 0, + 0, + 21073, + 0, + 21074, + 0, + 0, + 21078, + 0, + 0, + 0, + 0, + 21079, + 0, + 0, + 21080, + 21081, + 0, + 0, + 21086, + 21087, + 0, + 21089, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21091, + 0, + 21093, + 0, + 21094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21095, + 0, + 0, + 0, + 0, + 0, + 21096, + 0, + 21098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21099, + 0, + 0, + 21100, + 21101, + 21102, + 0, + 0, + 0, + 0, + 0, + 21103, + 0, + 21104, + 0, + 0, + 0, + 0, + 0, + 21105, + 21108, + 21109, + 0, + 0, + 21112, + 21113, + 0, + 0, + 0, + 0, + 0, + 0, + 21115, + 21122, + 21123, + 0, + 0, + 0, + 0, + 0, + 21125, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21129, + 21131, + 0, + 0, + 21134, + 0, + 0, + 0, + 21137, + 21142, + 0, + 21143, + 0, + 0, + 21144, + 0, + 21145, + 21146, + 0, + 21152, + 21154, + 21155, + 21156, + 0, + 0, + 0, + 21160, + 0, + 0, + 0, + 0, + 0, + 0, + 21161, + 0, + 21164, + 0, + 21166, + 0, + 0, + 0, + 0, + 21170, + 0, + 0, + 0, + 0, + 21171, + 0, + 0, + 21172, + 0, + 21174, + 0, + 21175, + 0, + 0, + 0, + 0, + 0, + 21176, + 21179, + 21188, + 0, + 0, + 0, + 21189, + 0, + 0, + 21190, + 0, + 0, + 0, + 21192, + 0, + 0, + 21193, + 0, + 0, + 0, + 21198, + 0, + 21212, + 0, + 0, + 21213, + 0, + 0, + 0, + 0, + 0, + 0, + 21215, + 21216, + 0, + 0, + 21223, + 21225, + 0, + 21226, + 0, + 0, + 0, + 0, + 21227, + 21228, + 0, + 0, + 21229, + 0, + 0, + 0, + 0, + 21230, + 21236, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21237, + 0, + 0, + 21238, + 21239, + 0, + 0, + 0, + 0, + 21256, + 0, + 0, + 0, + 0, + 0, + 21257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21259, + 0, + 0, + 0, + 21263, + 0, + 21272, + 0, + 21274, + 0, + 21282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21294, + 0, + 0, + 21297, + 0, + 0, + 0, + 0, + 21298, + 0, + 0, + 0, + 21299, + 0, + 21300, + 21302, + 0, + 21316, + 0, + 21318, + 21322, + 21323, + 0, + 21324, + 0, + 21326, + 0, + 0, + 0, + 21327, + 21328, + 0, + 0, + 0, + 21352, + 0, + 0, + 21354, + 21361, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21362, + 0, + 0, + 0, + 21363, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21366, + 0, + 0, + 21367, + 21372, + 21374, + 0, + 0, + 0, + 21375, + 21377, + 0, + 21378, + 0, + 0, + 0, + 21380, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21381, + 0, + 0, + 0, + 0, + 0, + 0, + 21382, + 0, + 21383, + 0, + 0, + 21384, + 0, + 0, + 21385, + 0, + 0, + 0, + 0, + 21389, + 21390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21397, + 21398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21399, + 0, + 21400, + 0, + 0, + 0, + 0, + 21402, + 0, + 0, + 0, + 21403, + 21404, + 0, + 21405, + 21406, + 0, + 0, + 0, + 21407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21408, + 0, + 0, + 0, + 0, + 21409, + 0, + 21421, + 0, + 21422, + 0, + 0, + 0, + 21425, + 21428, + 0, + 0, + 0, + 0, + 21429, + 0, + 0, + 0, + 0, + 0, + 21433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21434, + 0, + 21443, + 0, + 21444, + 21449, + 0, + 21452, + 0, + 21453, + 21454, + 0, + 0, + 0, + 21457, + 0, + 0, + 21458, + 0, + 0, + 0, + 21460, + 21461, + 0, + 0, + 21464, + 0, + 0, + 0, + 21473, + 21478, + 0, + 0, + 21479, + 0, + 0, + 21481, + 21483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21484, + 0, + 0, + 21485, + 21486, + 0, + 0, + 21488, + 0, + 0, + 0, + 0, + 0, + 0, + 21523, + 0, + 0, + 21525, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21526, + 0, + 0, + 0, + 0, + 0, + 0, + 21529, + 21530, + 0, + 0, + 21531, + 0, + 0, + 21533, + 0, + 0, + 21539, + 21564, + 0, + 21567, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21575, + 0, + 0, + 0, + 0, + 21577, + 0, + 0, + 0, + 0, + 0, + 21591, + 0, + 0, + 21604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21605, + 0, + 21606, + 0, + 0, + 21617, + 21618, + 21619, + 21620, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21623, + 0, + 0, + 0, + 0, + 21631, + 0, + 21635, + 0, + 0, + 0, + 0, + 21639, + 21646, + 21653, + 21662, + 0, + 0, + 21663, + 21664, + 0, + 21666, + 0, + 0, + 21667, + 0, + 21670, + 21672, + 21673, + 0, + 21674, + 21683, + 0, + 0, + 0, + 0, + 0, + 21684, + 0, + 21694, + 0, + 0, + 0, + 0, + 21695, + 21700, + 0, + 21703, + 0, + 21704, + 0, + 0, + 21709, + 0, + 0, + 0, + 21710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21711, + 0, + 0, + 0, + 21712, + 0, + 21717, + 0, + 21730, + 0, + 0, + 0, + 21731, + 21733, + 0, + 0, + 0, + 0, + 21737, + 21741, + 21742, + 0, + 21747, + 0, + 0, + 0, + 21749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21750, + 0, + 0, + 0, + 0, + 0, + 21752, + 0, + 0, + 0, + 0, + 21753, + 0, + 0, + 0, + 0, + 0, + 0, + 21755, + 21756, + 0, + 21757, + 0, + 0, + 0, + 0, + 0, + 0, + 21760, + 0, + 0, + 21763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21764, + 0, + 0, + 21766, + 0, + 0, + 21767, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21773, + 0, + 21774, + 0, + 0, + 21775, + 0, + 0, + 0, + 0, + 21776, + 0, + 0, + 21777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21780, + 21787, + 21788, + 21791, + 0, + 0, + 0, + 21797, + 0, + 0, + 0, + 0, + 0, + 21805, + 0, + 0, + 0, + 0, + 21806, + 0, + 21807, + 21809, + 0, + 21810, + 21811, + 0, + 21817, + 21819, + 21820, + 0, + 21823, + 0, + 21824, + 0, + 0, + 21825, + 0, + 0, + 21826, + 21832, + 0, + 0, + 0, + 0, + 0, + 21833, + 21848, + 21849, + 0, + 0, + 21867, + 21870, + 21871, + 21873, + 0, + 0, + 0, + 21874, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21875, + 0, + 21878, + 0, + 0, + 0, + 21879, + 0, + 21881, + 21886, + 0, + 0, + 0, + 0, + 21887, + 0, + 0, + 21888, + 21894, + 21895, + 21897, + 0, + 21901, + 0, + 21904, + 0, + 0, + 21906, + 0, + 0, + 0, + 21909, + 21910, + 21911, + 0, + 0, + 21912, + 0, + 0, + 21913, + 21914, + 21915, + 0, + 21919, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21921, + 0, + 0, + 21922, + 21933, + 21939, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21944, + 0, + 0, + 0, + 0, + 0, + 21945, + 0, + 21947, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21949, + 0, + 0, + 0, + 21950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21951, + 0, + 21952, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21954, + 21957, + 0, + 0, + 0, + 0, + 21958, + 0, + 21959, + 0, + 0, + 0, + 0, + 0, + 0, + 21962, + 21963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21964, + 21965, + 0, + 0, + 21969, + 21970, + 0, + 0, + 0, + 21974, + 0, + 0, + 21980, + 21981, + 0, + 21982, + 0, + 0, + 0, + 0, + 0, + 21985, + 0, + 21988, + 0, + 21992, + 0, + 21999, + 0, + 0, + 0, + 0, + 0, + 0, + 22001, + 0, + 22002, + 0, + 0, + 0, + 0, + 0, + 0, + 22003, + 0, + 0, + 0, + 0, + 0, + 22004, + 0, + 0, + 0, + 22008, + 0, + 22009, + 22015, + 0, + 0, + 22016, + 0, + 0, + 0, + 22017, + 22019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22020, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22021, + 22037, + 0, + 22039, + 0, + 0, + 0, + 22040, + 0, + 0, + 0, + 22048, + 22049, + 0, + 0, + 22053, + 22055, + 22056, + 22059, + 0, + 0, + 22060, + 22061, + 0, + 0, + 22064, + 0, + 0, + 0, + 0, + 22066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22073, + 0, + 0, + 0, + 22074, + 22075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22076, + 0, + 0, + 0, + 0, + 22077, + 22084, + 22099, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22104, + 0, + 0, + 22107, + 0, + 22108, + 0, + 22109, + 0, + 22110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22111, + 22119, + 0, + 22120, + 22122, + 0, + 0, + 0, + 0, + 22125, + 0, + 0, + 0, + 22128, + 22129, + 0, + 0, + 0, + 0, + 0, + 0, + 22141, + 0, + 0, + 0, + 22142, + 0, + 0, + 22144, + 22146, + 0, + 22148, + 22149, + 22151, + 22154, + 0, + 0, + 0, + 22162, + 0, + 0, + 0, + 0, + 22164, + 22177, + 0, + 0, + 0, + 0, + 22179, + 0, + 22182, + 22183, + 0, + 0, + 22184, + 22188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22190, + 0, + 22194, + 22201, + 0, + 0, + 22208, + 0, + 22209, + 0, + 22212, + 0, + 0, + 22215, + 0, + 22223, + 22231, + 0, + 0, + 22232, + 0, + 22234, + 0, + 0, + 22235, + 22236, + 0, + 22237, + 0, + 22240, + 0, + 0, + 0, + 0, + 0, + 22241, + 0, + 0, + 0, + 22242, + 22246, + 22247, + 0, + 0, + 0, + 22259, + 22268, + 0, + 22269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22270, + 0, + 0, + 0, + 0, + 22271, + 0, + 22272, + 0, + 22277, + 0, + 0, + 0, + 0, + 0, + 22278, + 22280, + 22283, + 22286, + 0, + 0, + 22287, + 22289, + 0, + 0, + 22290, + 0, + 22293, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22295, + 0, + 22301, + 22302, + 0, + 0, + 0, + 22305, + 0, + 22308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22315, + 0, + 0, + 0, + 22317, + 0, + 22334, + 0, + 0, + 0, + 22335, + 0, + 0, + 0, + 0, + 0, + 22336, + 0, + 22338, + 22344, + 0, + 22347, + 22349, + 0, + 22350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22357, + 0, + 0, + 0, + 0, + 0, + 22358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22359, + 22360, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22361, + 22366, + 0, + 0, + 22369, + 0, + 22370, + 22373, + 0, + 0, + 0, + 0, + 0, + 22375, + 0, + 22377, + 0, + 0, + 0, + 0, + 0, + 22378, + 0, + 0, + 0, + 0, + 22381, + 0, + 0, + 0, + 0, + 22382, + 0, + 22383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22391, + 0, + 0, + 22392, + 22395, + 22396, + 22402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22405, + 0, + 0, + 22406, + 0, + 0, + 22408, + 0, + 0, + 22409, + 22410, + 0, + 0, + 0, + 0, + 0, + 0, + 22424, + 0, + 0, + 0, + 0, + 22426, + 0, + 0, + 0, + 22427, + 0, + 22428, + 0, + 22432, + 0, + 22435, + 22442, + 22443, + 0, + 0, + 0, + 0, + 22444, + 0, + 0, + 0, + 0, + 0, + 22446, + 0, + 22454, + 0, + 22455, + 0, + 0, + 0, + 22465, + 0, + 22470, + 0, + 22471, + 0, + 0, + 0, + 0, + 22472, + 22473, + 0, + 22487, + 0, + 0, + 0, + 22488, + 0, + 0, + 0, + 0, + 22489, + 0, + 0, + 22499, + 0, + 0, + 0, + 0, + 0, + 0, + 22514, + 0, + 0, + 22515, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22516, + 0, + 0, + 0, + 22517, + 22520, + 0, + 0, + 0, + 22534, + 0, + 0, + 22535, + 0, + 0, + 22536, + 0, + 22540, + 22553, + 0, + 22555, + 0, + 0, + 0, + 0, + 22561, + 0, + 0, + 22562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22566, + 0, + 0, + 0, + 0, + 22567, + 22568, + 0, + 0, + 22575, + 0, + 22579, + 0, + 22582, + 22583, + 22585, + 0, + 0, + 0, + 0, + 0, + 22586, + 0, + 0, + 22587, + 0, + 0, + 22590, + 0, + 0, + 0, + 0, + 0, + 22591, + 0, + 22592, + 0, + 0, + 0, + 0, + 0, + 22593, + 0, + 22602, + 0, + 0, + 22604, + 0, + 0, + 22609, + 0, + 0, + 22618, + 0, + 0, + 0, + 0, + 0, + 0, + 22619, + 0, + 22624, + 22625, + 0, + 0, + 22638, + 0, + 0, + 0, + 0, + 0, + 22639, + 0, + 0, + 22640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22644, + 0, + 22645, + 22647, + 0, + 0, + 0, + 0, + 22652, + 22653, + 0, + 0, + 0, + 22654, + 0, + 22655, + 0, + 0, + 0, + 22656, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22673, + 22675, + 22676, + 0, + 0, + 22678, + 22679, + 0, + 22691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22693, + 0, + 0, + 22696, + 0, + 22699, + 22707, + 22708, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22718, + 0, + 22719, + 0, + 0, + 0, + 0, + 22723, + 0, + 0, + 0, + 22724, + 22725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22726, + 22728, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22729, + 0, + 0, + 22731, + 0, + 0, + 0, + 0, + 22732, + 22735, + 22736, + 0, + 0, + 0, + 0, + 22739, + 0, + 22749, + 0, + 0, + 22751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22758, + 0, + 0, + 0, + 0, + 0, + 22760, + 0, + 0, + 0, + 0, + 0, + 22764, + 22765, + 22766, + 0, + 22768, + 0, + 0, + 0, + 0, + 0, + 22769, + 22770, + 0, + 0, + 0, + 0, + 0, + 0, + 22771, + 0, + 0, + 22772, + 22775, + 0, + 22776, + 22777, + 22780, + 0, + 0, + 22782, + 22784, + 0, + 22787, + 0, + 22789, + 22796, + 0, + 0, + 0, + 0, + 0, + 22798, + 0, + 0, + 0, + 0, + 0, + 0, + 22802, + 0, + 22803, + 22804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22805, + 0, + 0, + 22810, + 22811, + 22814, + 22816, + 0, + 22825, + 22826, + 0, + 22831, + 22833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22834, + 0, + 22836, + 22838, + 0, + 22839, + 0, + 0, + 0, + 0, + 0, + 22840, + 0, + 22847, + 0, + 0, + 0, + 0, + 0, + 22856, + 22857, + 0, + 22858, + 22859, + 0, + 0, + 22862, + 0, + 0, + 22864, + 0, + 0, + 0, + 0, + 22865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22866, + 0, + 22867, + 22868, + 0, + 0, + 0, + 0, + 22869, + 0, + 22871, + 0, + 22872, + 0, + 22873, + 22881, + 22882, + 22884, + 22885, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22886, + 22887, + 0, + 22894, + 0, + 22895, + 0, + 0, + 0, + 22900, + 0, + 22901, + 0, + 0, + 0, + 0, + 22904, + 0, + 0, + 0, + 0, + 22905, + 22907, + 0, + 0, + 0, + 22915, + 22917, + 0, + 0, + 22918, + 0, + 0, + 0, + 22920, + 0, + 0, + 0, + 22929, + 22930, + 0, + 0, + 0, + 22941, + 22942, + 0, + 0, + 0, + 22943, + 0, + 0, + 0, + 22944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22946, + 0, + 22947, + 0, + 0, + 22954, + 0, + 22956, + 0, + 0, + 22962, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22963, + 0, + 0, + 22964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22965, + 0, + 22968, + 0, + 0, + 0, + 22969, + 0, + 0, + 0, + 0, + 0, + 22970, + 0, + 22971, + 0, + 0, + 0, + 0, + 0, + 22978, + 0, + 0, + 22979, + 0, + 22987, + 0, + 0, + 22989, + 0, + 0, + 0, + 0, + 0, + 0, + 22990, + 0, + 23005, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23006, + 23007, + 23008, + 0, + 0, + 23023, + 23024, + 23029, + 0, + 0, + 0, + 0, + 23030, + 0, + 0, + 0, + 0, + 0, + 23032, + 0, + 0, + 0, + 0, + 0, + 23035, + 0, + 0, + 0, + 0, + 23038, + 0, + 0, + 0, + 23048, + 0, + 23049, + 23052, + 23053, + 23060, + 23061, + 0, + 23063, + 0, + 0, + 0, + 0, + 23067, + 23068, + 0, + 0, + 0, + 23069, + 23073, + 0, + 0, + 0, + 23127, + 0, + 23128, + 0, + 0, + 0, + 0, + 0, + 23129, + 0, + 23138, + 23141, + 0, + 23149, + 0, + 0, + 23150, + 0, + 0, + 0, + 23152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23154, + 0, + 0, + 0, + 0, + 23157, + 23159, + 23160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23180, + 0, + 0, + 0, + 0, + 23181, + 0, + 0, + 23188, + 0, + 23189, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23195, + 0, + 0, + 23196, + 23199, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23202, + 0, + 23204, + 0, + 23207, + 0, + 23209, + 23210, + 0, + 0, + 0, + 0, + 0, + 0, + 23227, + 23229, + 0, + 0, + 23230, + 23234, + 23238, + 0, + 0, + 0, + 23245, + 23246, + 23248, + 0, + 0, + 0, + 0, + 23249, + 23254, + 0, + 0, + 0, + 23265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23268, + 0, + 23276, + 0, + 0, + 0, + 0, + 23277, + 0, + 23297, + 0, + 23298, + 0, + 0, + 0, + 0, + 23299, + 0, + 23302, + 0, + 0, + 23303, + 23312, + 0, + 0, + 23314, + 0, + 23320, + 0, + 0, + 0, + 0, + 23324, + 0, + 23325, + 0, + 23328, + 0, + 23334, + 0, + 0, + 0, + 23337, + 0, + 0, + 0, + 0, + 23343, + 23344, + 23346, + 0, + 23348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23353, + 0, + 0, + 0, + 0, + 23355, + 0, + 23356, + 23358, + 0, + 0, + 0, + 23359, + 23360, + 0, + 23361, + 0, + 23367, + 0, + 23369, + 0, + 0, + 23373, + 0, + 23378, + 23379, + 0, + 23382, + 23383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23387, + 0, + 0, + 0, + 0, + 0, + 0, + 23388, + 23390, + 0, + 0, + 23393, + 23398, + 0, + 0, + 0, + 23399, + 0, + 0, + 0, + 23400, + 0, + 0, + 0, + 0, + 23401, + 0, + 0, + 0, + 23415, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23416, + 0, + 23422, + 0, + 23443, + 23444, + 0, + 0, + 0, + 0, + 23448, + 0, + 23454, + 0, + 0, + 0, + 0, + 0, + 0, + 23456, + 0, + 0, + 23458, + 23464, + 0, + 0, + 0, + 0, + 0, + 0, + 23465, + 0, + 0, + 0, + 23470, + 23471, + 0, + 0, + 23472, + 0, + 0, + 0, + 23473, + 23496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23497, + 0, + 23499, + 0, + 0, + 23502, + 0, + 0, + 23503, + 0, + 0, + 23513, + 0, + 0, + 23515, + 0, + 0, + 0, + 23517, + 0, + 0, + 0, + 0, + 23518, + 23519, + 23521, + 23524, + 0, + 23525, + 23528, + 23539, + 0, + 0, + 0, + 0, + 0, + 23541, + 0, + 0, + 23544, + 0, + 0, + 23556, + 0, + 0, + 23557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23559, + 0, + 23560, + 0, + 0, + 23561, + 0, + 0, + 23566, + 0, + 0, + 0, + 0, + 0, + 23568, + 23569, + 23570, + 0, + 0, + 0, + 0, + 23571, + 0, + 23574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23575, + 0, + 23579, + 0, + 0, + 23581, + 0, + 0, + 0, + 0, + 0, + 0, + 23587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23596, + 23598, + 0, + 0, + 0, + 0, + 23602, + 23606, + 0, + 0, + 23607, + 0, + 23608, + 0, + 0, + 0, + 23614, + 23616, + 0, + 0, + 0, + 0, + 0, + 23618, + 0, + 0, + 23619, + 0, + 0, + 0, + 0, + 23621, + 23626, + 0, + 23627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23629, + 0, + 23630, + 0, + 0, + 0, + 0, + 23634, + 0, + 23636, + 0, + 0, + 0, + 0, + 0, + 0, + 23638, + 0, + 0, + 0, + 0, + 23640, + 23667, + 0, + 23669, + 0, + 0, + 0, + 23681, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23682, + 0, + 23683, + 0, + 0, + 0, + 0, + 0, + 23684, + 0, + 0, + 0, + 23685, + 23689, + 0, + 23693, + 23694, + 23700, + 0, + 23702, + 0, + 23709, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23712, + 0, + 0, + 0, + 0, + 0, + 23714, + 0, + 0, + 23715, + 0, + 0, + 0, + 0, + 23718, + 0, + 0, + 23720, + 0, + 0, + 0, + 0, + 23722, + 0, + 0, + 0, + 23726, + 23729, + 0, + 23741, + 23746, + 0, + 23748, + 0, + 0, + 0, + 0, + 23749, + 0, + 0, + 0, + 0, + 0, + 23750, + 0, + 0, + 0, + 0, + 23751, + 0, + 23753, + 0, + 0, + 0, + 0, + 23757, + 23765, + 0, + 0, + 0, + 23770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23771, + 0, + 23772, + 23781, + 0, + 0, + 23796, + 0, + 0, + 0, + 0, + 23798, + 0, + 23799, + 0, + 0, + 0, + 23802, + 0, + 0, + 23806, + 0, + 23807, + 0, + 0, + 23808, + 0, + 23809, + 0, + 23819, + 0, + 0, + 0, + 23821, + 0, + 23827, + 0, + 0, + 0, + 23829, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23830, + 0, + 0, + 0, + 0, + 0, + 0, + 23832, + 23833, + 23834, + 23835, + 0, + 0, + 0, + 0, + 23837, + 23838, + 0, + 0, + 0, + 0, + 0, + 23846, + 0, + 0, + 0, + 0, + 0, + 0, + 23847, + 0, + 0, + 0, + 0, + 0, + 23879, + 23881, + 0, + 0, + 23882, + 23883, + 23895, + 0, + 23899, + 0, + 0, + 0, + 0, + 23901, + 0, + 0, + 0, + 0, + 0, + 0, + 23902, + 0, + 0, + 0, + 0, + 0, + 23903, + 23905, + 0, + 23906, + 0, + 23907, + 23918, + 23919, + 23920, + 0, + 23922, + 0, + 23924, + 0, + 23927, + 0, + 23934, + 0, + 23937, + 23941, + 0, + 23942, + 23946, + 0, + 0, + 0, + 0, + 0, + 23955, + 23956, + 23958, + 0, + 0, + 0, + 0, + 0, + 0, + 23959, + 0, + 23962, + 23965, + 0, + 23966, + 0, + 0, + 0, + 0, + 23967, + 23968, + 0, + 0, + 23973, + 0, + 0, + 23974, + 0, + 0, + 0, + 0, + 23975, + 0, + 23976, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23977, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23980, + 0, + 0, + 23984, + 0, + 23985, + 0, + 0, + 23987, + 0, + 0, + 23988, + 23990, + 23991, + 0, + 0, + 0, + 0, + 0, + 0, + 23992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23994, + 0, + 0, + 0, + 23998, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23999, + 0, + 0, + 24003, + 0, + 24004, + 0, + 24006, + 0, + 0, + 0, + 24007, + 0, + 0, + 24008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24009, + 0, + 0, + 24010, + 0, + 0, + 24011, + 0, + 0, + 24013, + 24014, + 0, + 0, + 24015, + 24016, + 24027, + 0, + 24028, + 24029, + 0, + 24030, + 0, + 0, + 0, + 0, + 0, + 24033, + 24034, + 0, + 24035, + 0, + 0, + 24036, + 0, + 0, + 24044, + 0, + 24048, + 24049, + 24063, + 24067, + 0, + 24068, + 24070, + 0, + 0, + 24071, + 24078, + 24087, + 0, + 24090, + 0, + 0, + 0, + 24095, + 0, + 24098, + 24101, + 24104, + 24106, + 0, + 24107, + 0, + 0, + 0, + 24108, + 0, + 0, + 0, + 0, + 24110, + 24111, + 0, + 24113, + 0, + 0, + 24115, + 24120, + 0, + 0, + 0, + 0, + 0, + 0, + 24124, + 0, + 24125, + 0, + 24126, + 0, + 24127, + 0, + 0, + 0, + 0, + 0, + 24135, + 0, + 0, + 24136, + 0, + 24137, + 24142, + 0, + 0, + 0, + 24146, + 0, + 0, + 24147, + 24149, + 24154, + 0, + 24163, + 0, + 0, + 0, + 24165, + 24166, + 24167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24169, + 24170, + 24175, + 0, + 0, + 0, + 24178, + 0, + 0, + 24179, + 0, + 0, + 24181, + 0, + 24184, + 24197, + 0, + 24201, + 24204, + 0, + 0, + 0, + 0, + 0, + 0, + 24206, + 24212, + 24220, + 0, + 0, + 0, + 24224, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24226, + 0, + 24234, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24235, + 0, + 24236, + 0, + 0, + 0, + 0, + 0, + 24239, + 24240, + 24241, + 0, + 0, + 24248, + 0, + 0, + 24249, + 0, + 24251, + 0, + 0, + 0, + 0, + 0, + 0, + 24253, + 0, + 24268, + 0, + 0, + 0, + 24269, + 0, + 24271, + 24272, + 0, + 0, + 0, + 0, + 24273, + 0, + 0, + 24274, + 0, + 0, + 24279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24280, + 0, + 24293, + 24294, + 0, + 0, + 0, + 0, + 0, + 0, + 24296, + 0, + 0, + 24323, + 0, + 0, + 0, + 24329, + 24330, + 24331, + 24339, + 0, + 24351, + 0, + 0, + 24369, + 24370, + 0, + 0, + 0, + 24371, + 0, + 0, + 0, + 0, + 24372, + 24373, + 24374, + 0, + 0, + 0, + 0, + 0, + 24378, + 0, + 0, + 0, + 0, + 24379, + 0, + 24381, + 0, + 24383, + 24389, + 0, + 24390, + 0, + 0, + 24394, + 24395, + 24400, + 0, + 0, + 0, + 24401, + 24402, + 0, + 24406, + 0, + 0, + 0, + 24411, + 0, + 0, + 0, + 24415, + 0, + 24416, + 0, + 0, + 0, + 0, + 0, + 24417, + 0, + 24419, + 0, + 24422, + 0, + 24423, + 24428, + 0, + 24435, + 0, + 0, + 0, + 24439, + 0, + 0, + 0, + 24440, + 24442, + 24446, + 0, + 0, + 0, + 24447, + 24448, + 24449, + 24452, + 0, + 0, + 0, + 0, + 24453, + 24457, + 0, + 0, + 24458, + 24459, + 24460, + 0, + 24465, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24471, + 0, + 24473, + 24474, + 24475, + 24476, + 0, + 24478, + 0, + 0, + 0, + 0, + 24480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24481, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24482, + 24485, + 0, + 0, + 0, + 0, + 24486, + 0, + 0, + 0, + 24488, + 0, + 0, + 0, + 24494, + 0, + 0, + 0, + 0, + 24497, + 0, + 0, + 24498, + 0, + 0, + 0, + 24499, + 24506, + 0, + 0, + 0, + 24507, + 0, + 0, + 24511, + 0, + 0, + 24513, + 24514, + 0, + 0, + 0, + 0, + 0, + 24517, + 0, + 24518, + 0, + 24520, + 0, + 24521, + 24524, + 24525, + 0, + 0, + 0, + 0, + 0, + 24527, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24537, + 24539, + 0, + 24540, + 0, + 0, + 0, + 24548, + 0, + 0, + 0, + 0, + 0, + 24549, + 24550, + 0, + 0, + 0, + 24553, + 24554, + 0, + 24555, + 0, + 24556, + 0, + 24558, + 0, + 0, + 0, + 0, + 0, + 24560, + 0, + 0, + 0, + 24561, + 0, + 0, + 0, + 0, + 0, + 24562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24567, + 0, + 0, + 0, + 0, + 0, + 24569, + 0, + 0, + 0, + 24574, + 0, + 24575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24577, + 24581, + 0, + 24584, + 0, + 0, + 0, + 0, + 0, + 24585, + 0, + 0, + 0, + 0, + 0, + 24586, + 0, + 0, + 24587, + 0, + 24588, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24590, + 24591, + 0, + 0, + 0, + 0, + 24592, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24594, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24596, + 24597, + 0, + 0, + 0, + 0, + 24602, + 24603, + 0, + 0, + 0, + 0, + 24604, + 0, + 0, + 24605, + 0, + 24610, + 0, + 0, + 24611, + 0, + 0, + 0, + 0, + 24612, + 24615, + 24616, + 24624, + 0, + 0, + 0, + 24627, + 0, + 24638, + 24639, + 0, + 0, + 0, + 0, + 24640, + 0, + 0, + 0, + 24655, + 24656, + 24657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24662, + 0, + 24663, + 24664, + 0, + 0, + 0, + 0, + 0, + 24665, + 0, + 0, + 0, + 0, + 24667, + 0, + 0, + 0, + 0, + 0, + 0, + 24668, + 24669, + 0, + 24670, + 24674, + 0, + 0, + 0, + 24675, + 0, + 24678, + 0, + 0, + 24679, + 0, + 0, + 0, + 24681, + 0, + 24683, + 0, + 0, + 0, + 0, + 24684, + 0, + 24685, + 0, + 0, + 24686, + 0, + 0, + 24688, + 24689, + 0, + 0, + 0, + 0, + 24690, + 24691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24697, + 0, + 24698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24709, + 0, + 0, + 0, + 0, + 0, + 24710, + 0, + 24712, + 0, + 0, + 0, + 0, + 0, + 0, + 24713, + 24714, + 0, + 24715, + 0, + 24716, + 24718, + 0, + 24719, + 0, + 0, + 0, + 0, + 24720, + 0, + 0, + 24725, + 0, + 0, + 24738, + 0, + 24749, + 24750, + 0, + 0, + 0, + 24752, + 0, + 0, + 0, + 24753, + 0, + 0, + 0, + 24758, + 0, + 0, + 0, + 0, + 0, + 24762, + 0, + 24763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24764, + 0, + 0, + 0, + 0, + 0, + 24765, + 24767, + 24768, + 0, + 24772, + 0, + 0, + 0, + 0, + 24773, + 0, + 0, + 0, + 0, + 24777, + 0, + 0, + 0, + 0, + 0, + 24785, + 0, + 24786, + 24788, + 0, + 0, + 0, + 24789, + 0, + 0, + 0, + 0, + 24794, + 24798, + 0, + 24799, + 24800, + 0, + 0, + 0, + 24803, + 0, + 24804, + 24806, + 0, + 24807, + 0, + 0, + 0, + 24810, + 0, + 0, + 0, + 0, + 0, + 0, + 24827, + 24828, + 0, + 24835, + 0, + 0, + 0, + 0, + 0, + 0, + 24836, + 0, + 0, + 0, + 0, + 0, + 24839, + 0, + 24843, + 24844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24847, + 0, + 0, + 24848, + 0, + 0, + 0, + 0, + 0, + 0, + 24849, + 0, + 24850, + 24851, + 0, + 0, + 0, + 24852, + 0, + 24853, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24854, + 0, + 24855, + 0, + 0, + 24868, + 0, + 0, + 0, + 24883, + 0, + 0, + 0, + 24884, + 0, + 24895, + 24897, + 0, + 0, + 0, + 0, + 0, + 24899, + 0, + 0, + 0, + 0, + 0, + 24900, + 0, + 24913, + 0, + 0, + 0, + 0, + 0, + 0, + 24914, + 0, + 0, + 24917, + 24930, + 24931, + 0, + 0, + 0, + 24932, + 0, + 0, + 24939, + 0, + 0, + 24942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24945, + 24950, + 0, + 24951, + 0, + 0, + 24953, + 0, + 0, + 0, + 24954, + 0, + 24959, + 0, + 0, + 0, + 24961, + 0, + 0, + 24962, + 0, + 24964, + 24968, + 24970, + 24972, + 0, + 0, + 0, + 0, + 0, + 24976, + 0, + 0, + 0, + 24977, + 0, + 24982, + 0, + 0, + 24983, + 0, + 0, + 24984, + 0, + 0, + 0, + 24993, + 0, + 0, + 0, + 24994, + 0, + 0, + 25001, + 0, + 0, + 0, + 25003, + 0, + 0, + 25018, + 0, + 0, + 25023, + 0, + 0, + 0, + 25034, + 0, + 0, + 25035, + 25036, + 0, + 25037, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25039, + 0, + 0, + 0, + 0, + 0, + 25040, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25042, + 0, + 0, + 25043, + 25045, + 0, + 0, + 0, + 0, + 0, + 0, + 25049, + 0, + 0, + 25051, + 0, + 25052, + 25053, + 0, + 0, + 25054, + 0, + 0, + 0, + 25055, + 0, + 0, + 0, + 0, + 25057, + 25059, + 0, + 0, + 25060, + 25064, + 0, + 25065, + 25069, + 25070, + 0, + 0, + 0, + 0, + 25072, + 0, + 25073, + 0, + 25090, + 0, + 0, + 25092, + 25093, + 25101, + 0, + 0, + 0, + 0, + 0, + 0, + 25105, + 25108, + 0, + 0, + 25113, + 0, + 0, + 25115, + 25116, + 0, + 0, + 0, + 0, + 0, + 0, + 25117, + 0, + 0, + 0, + 25120, + 25121, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25125, + 0, + 0, + 0, + 25126, + 0, + 25130, + 25134, + 0, + 25139, + 0, + 25143, + 0, + 0, + 0, + 25151, + 0, + 25161, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25163, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25174, + 0, + 25175, + 0, + 25207, + 0, + 0, + 0, + 25209, + 0, + 0, + 0, + 0, + 25213, + 0, + 25219, + 0, + 25223, + 0, + 25225, + 0, + 0, + 0, + 25227, + 0, + 0, + 0, + 25228, + 0, + 0, + 0, + 25229, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25231, + 25233, + 0, + 0, + 0, + 0, + 25237, + 25239, + 0, + 0, + 0, + 25243, + 0, + 0, + 0, + 25252, + 0, + 25257, + 25258, + 0, + 0, + 0, + 0, + 25260, + 25265, + 0, + 25268, + 0, + 0, + 25273, + 25324, + 0, + 25325, + 0, + 25326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25327, + 0, + 0, + 0, + 0, + 0, + 25328, + 0, + 0, + 0, + 0, + 0, + 0, + 25332, + 0, + 0, + 0, + 25333, + 0, + 0, + 0, + 25336, + 25337, + 25338, + 0, + 0, + 25343, + 0, + 25350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25352, + 0, + 25354, + 0, + 25375, + 0, + 25379, + 0, + 0, + 0, + 0, + 25384, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25386, + 0, + 25388, + 0, + 25390, + 0, + 0, + 25399, + 0, + 0, + 25401, + 0, + 0, + 0, + 25402, + 0, + 0, + 0, + 25407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25413, + 25415, + 0, + 0, + 25417, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25419, + 0, + 0, + 0, + 25421, + 0, + 0, + 0, + 25424, + 0, + 0, + 0, + 0, + 25433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25435, + 0, + 0, + 0, + 0, + 0, + 0, + 25436, + 0, + 0, + 0, + 25437, + 0, + 0, + 25440, + 0, + 0, + 0, + 0, + 0, + 0, + 25442, + 0, + 0, + 25443, + 0, + 25446, + 0, + 0, + 25449, + 0, + 0, + 0, + 25450, + 0, + 0, + 0, + 0, + 25452, + 0, + 25453, + 25454, + 25455, + 0, + 0, + 0, + 25456, + 0, + 25457, + 0, + 0, + 0, + 25459, + 0, + 25461, + 0, + 25468, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25469, + 0, + 0, + 0, + 0, + 0, + 25471, + 0, + 0, + 0, + 0, + 0, + 25474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25475, + 0, + 0, + 0, + 0, + 25477, + 0, + 0, + 0, + 0, + 25483, + 0, + 0, + 0, + 0, + 0, + 25484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25485, + 0, + 25497, + 0, + 0, + 25498, + 0, + 25504, + 0, + 25510, + 0, + 25512, + 0, + 0, + 25513, + 25514, + 0, + 0, + 0, + 0, + 0, + 0, + 25517, + 25518, + 25519, + 0, + 25520, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25521, + 0, + 25522, + 25527, + 25534, + 0, + 25536, + 0, + 25537, + 0, + 0, + 25548, + 25550, + 0, + 0, + 25551, + 0, + 25552, + 0, + 0, + 0, + 0, + 0, + 25554, + 0, + 25555, + 0, + 25556, + 25557, + 25568, + 0, + 0, + 0, + 25570, + 25571, + 0, + 0, + 0, + 0, + 0, + 0, + 25574, + 0, + 0, + 0, + 0, + 25579, + 0, + 0, + 0, + 25581, + 0, + 0, + 0, + 25582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25588, + 0, + 0, + 0, + 0, + 25589, + 0, + 0, + 0, + 0, + 25590, + 0, + 25591, + 25592, + 25593, + 0, + 25594, + 0, + 0, + 0, + 25596, + 0, + 25597, + 25615, + 0, + 0, + 0, + 0, + 0, + 25618, + 0, + 0, + 0, + 0, + 25619, + 25623, + 0, + 0, + 25629, + 0, + 0, + 25631, + 0, + 0, + 0, + 25635, + 25636, + 0, + 0, + 25649, + 0, + 0, + 0, + 0, + 25654, + 0, + 0, + 0, + 25661, + 25663, + 0, + 0, + 25671, + 0, + 0, + 25678, + 25698, + 0, + 25699, + 25702, + 25703, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25704, + 0, + 0, + 0, + 0, + 0, + 25706, + 0, + 0, + 25710, + 0, + 25711, + 0, + 25712, + 0, + 25715, + 25716, + 25717, + 0, + 0, + 25718, + 25728, + 25732, + 0, + 0, + 0, + 25734, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25737, + 0, + 0, + 25739, + 0, + 0, + 0, + 25740, + 0, + 25741, + 25745, + 0, + 25746, + 0, + 25748, + 25772, + 25778, + 0, + 0, + 0, + 0, + 0, + 25780, + 0, + 0, + 0, + 0, + 25781, + 0, + 25782, + 25784, + 25785, + 0, + 0, + 0, + 25789, + 0, + 0, + 0, + 0, + 0, + 0, + 25797, + 25801, + 0, + 0, + 0, + 25808, + 25809, + 0, + 0, + 25811, + 25814, + 25815, + 0, + 0, + 25817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25820, + 0, + 0, + 0, + 0, + 25832, + 25833, + 0, + 0, + 0, + 25846, + 0, + 0, + 0, + 25847, + 25848, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25849, + 25850, + 0, + 0, + 25851, + 0, + 0, + 25852, + 0, + 25862, + 0, + 0, + 0, + 25863, + 25865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25867, + 25868, + 0, + 25869, + 25874, + 0, + 25875, + 0, + 25876, + 25877, + 0, + 0, + 0, + 0, + 25878, + 25902, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25903, + 25904, + 25905, + 0, + 0, + 0, + 25908, + 25909, + 0, + 0, + 0, + 0, + 25910, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25912, + 0, + 25913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25914, + 0, + 0, + 25916, + 0, + 0, + 0, + 0, + 0, + 25917, + 25927, + 0, + 0, + 0, + 0, + 25928, + 0, + 0, + 25930, + 0, + 0, + 0, + 25933, + 0, + 0, + 25938, + 25942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25945, + 0, + 25950, + 0, + 25956, + 0, + 0, + 25961, + 25962, + 0, + 0, + 25963, + 0, + 25964, + 25965, + 25966, + 0, + 0, + 0, + 0, + 0, + 25967, + 0, + 0, + 0, + 0, + 25968, + 0, + 0, + 0, + 25969, + 25971, + 0, + 0, + 0, + 0, + 0, + 25973, + 25975, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25978, + 0, + 25981, + 0, + 0, + 0, + 25982, + 0, + 0, + 0, + 25984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26002, + 0, + 0, + 0, + 26005, + 0, + 0, + 0, + 26006, + 26007, + 0, + 0, + 26014, + 26015, + 26016, + 0, + 0, + 0, + 0, + 0, + 0, + 26017, + 26018, + 26020, + 0, + 26022, + 26023, + 0, + 0, + 0, + 26024, + 26028, + 0, + 26029, + 26033, + 26034, + 26044, + 0, + 0, + 0, + 0, + 0, + 26046, + 0, + 0, + 26047, + 0, + 0, + 26049, + 0, + 26050, + 0, + 26051, + 0, + 0, + 0, + 0, + 0, + 26053, + 0, + 0, + 0, + 0, + 26054, + 26059, + 0, + 0, + 0, + 0, + 0, + 0, + 26060, + 0, + 26066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26067, + 0, + 26069, + 0, + 0, + 26071, + 0, + 0, + 0, + 26073, + 0, + 26074, + 26077, + 0, + 0, + 0, + 0, + 26078, + 0, + 0, + 0, + 26079, + 0, + 26090, + 0, + 0, + 26094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26095, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26096, + 26101, + 0, + 26107, + 26122, + 0, + 26124, + 0, + 0, + 26125, + 0, + 0, + 0, + 0, + 0, + 0, + 26136, + 26141, + 26155, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26164, + 26166, + 0, + 0, + 0, + 26167, + 0, + 26170, + 26171, + 0, + 0, + 26172, + 0, + 0, + 26174, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26175, + 0, + 0, + 0, + 26176, + 26177, + 0, + 26321, + 26322, + 0, + 26323, + 0, + 0, + 26324, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26325, + 0, + 26331, + 0, + 0, + 0, + 0, + 0, + 0, + 26335, + 0, + 0, + 0, + 26350, + 0, + 0, + 0, + 26379, + 0, + 0, + 26382, + 26383, + 26385, + 0, + 0, + 26392, + 26406, + 0, + 0, + 0, + 0, + 26411, + 0, + 0, + 0, + 0, + 0, + 26412, + 0, + 0, + 26420, + 0, + 0, + 26423, + 0, + 26424, + 26426, + 26432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26435, + 0, + 26436, + 0, + 0, + 0, + 0, + 0, + 26441, + 0, + 26444, + 0, + 0, + 0, + 26446, + 0, + 0, + 0, + 0, + 26447, + 0, + 0, + 0, + 0, + 26449, + 0, + 26450, + 26452, + 0, + 26453, + 26454, + 0, + 0, + 0, + 26455, + 0, + 0, + 0, + 26456, + 0, + 0, + 26458, + 0, + 0, + 26460, + 0, + 26463, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26464, + 26470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26473, + 0, + 0, + 26474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26475, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26477, + 0, + 26485, + 0, + 0, + 26486, + 0, + 26487, + 0, + 0, + 26488, + 26493, + 26494, + 0, + 0, + 26495, + 0, + 26497, + 26504, + 26506, + 0, + 0, + 0, + 0, + 0, + 26507, + 0, + 0, + 0, + 0, + 0, + 26509, + 0, + 0, + 26510, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26512, + 0, + 26513, + 26515, + 0, + 0, + 0, + 26518, + 0, + 0, + 0, + 26519, + 0, + 26524, + 26526, + 0, + 0, + 0, + 26527, + 0, + 26532, + 0, + 26533, + 26537, + 26558, + 0, + 0, + 0, + 26559, + 0, + 0, + 0, + 26571, + 0, + 0, + 26573, + 0, + 26588, + 0, + 26593, + 0, + 0, + 0, + 0, + 0, + 0, + 26603, + 0, + 26604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26606, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26607, + 26609, + 26611, + 26614, + 0, + 0, + 0, + 26616, + 26620, + 0, + 26621, + 0, + 0, + 0, + 0, + 0, + 26627, + 0, + 26629, + 0, + 0, + 26630, + 0, + 0, + 26632, + 26643, + 0, + 0, + 0, + 26644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26646, + 26647, + 0, + 0, + 0, + 26650, + 0, + 0, + 26656, + 0, + 0, + 0, + 0, + 26663, + 26670, + 26671, + 0, + 0, + 0, + 26685, + 26686, + 26687, + 0, + 26689, + 0, + 0, + 0, + 0, + 26744, + 0, + 26745, + 0, + 26747, + 26748, + 0, + 26749, + 26750, + 26751, + 0, + 0, + 0, + 0, + 26752, + 26755, + 0, + 0, + 0, + 26756, + 26769, + 0, + 0, + 0, + 26774, + 0, + 0, + 0, + 0, + 0, + 26775, + 0, + 26777, + 26778, + 0, + 26786, + 0, + 0, + 0, + 26787, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26788, + 0, + 0, + 26789, + 0, + 0, + 0, + 0, + 0, + 26791, + 0, + 26792, + 26793, + 0, + 0, + 0, + 26794, + 0, + 26797, + 26798, + 0, + 0, + 0, + 26800, + 0, + 0, + 26803, + 0, + 26804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26805, + 0, + 0, + 26808, + 0, + 0, + 26809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26812, + 0, + 26825, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26826, + 0, + 0, + 26827, + 26829, + 26834, + 0, + 0, + 0, + 0, + 26835, + 0, + 0, + 26849, + 0, + 26851, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26852, + 0, + 26853, + 26857, + 0, + 26858, + 0, + 26859, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26876, + 0, + 26878, + 26882, + 26883, + 0, + 0, + 0, + 0, + 26890, + 26894, + 0, + 0, + 0, + 0, + 26895, + 26896, + 0, + 0, + 0, + 0, + 0, + 26900, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26911, + 26913, + 26914, + 26915, + 26916, + 26919, + 0, + 0, + 0, + 26921, + 26922, + 0, + 0, + 26925, + 0, + 0, + 0, + 26928, + 0, + 0, + 26929, + 26930, + 0, + 0, + 0, + 26931, + 0, + 26932, + 0, + 0, + 0, + 0, + 0, + 26933, + 0, + 0, + 0, + 0, + 0, + 0, + 26937, + 0, + 0, + 26943, + 0, + 0, + 26944, + 0, + 0, + 0, + 26946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26956, + 0, + 26958, + 0, + 0, + 26963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26965, + 0, + 26969, + 26970, + 26972, + 0, + 0, + 0, + 0, + 0, + 26973, + 0, + 26974, + 0, + 26978, + 0, + 26980, + 0, + 0, + 0, + 0, + 0, + 0, + 26982, + 0, + 26986, + 26987, + 0, + 26990, + 0, + 0, + 0, + 0, + 27003, + 27006, + 0, + 0, + 27007, + 27010, + 27012, + 27013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27014, + 27015, + 27018, + 0, + 27019, + 0, + 0, + 0, + 0, + 0, + 27025, + 0, + 0, + 0, + 27026, + 0, + 0, + 0, + 0, + 27029, + 27030, + 27031, + 27034, + 0, + 0, + 27036, + 27037, + 0, + 0, + 0, + 27038, + 27042, + 0, + 0, + 0, + 27044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27047, + 27049, + 0, + 27050, + 0, + 0, + 0, + 27051, + 27052, + 0, + 27055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27056, + 27058, + 27059, + 0, + 27061, + 0, + 27064, + 0, + 0, + 0, + 0, + 0, + 27069, + 0, + 0, + 27070, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27076, + 0, + 0, + 0, + 0, + 0, + 27078, + 0, + 27079, + 0, + 0, + 0, + 27081, + 0, + 0, + 0, + 0, + 0, + 0, + 27082, + 0, + 27083, + 27086, + 0, + 0, + 0, + 0, + 27087, + 0, + 0, + 0, + 0, + 0, + 27088, + 27090, + 0, + 27094, + 0, + 0, + 27095, + 0, + 27099, + 27102, + 0, + 0, + 0, + 27103, + 0, + 0, + 0, + 0, + 27105, + 0, + 0, + 0, + 27106, + 0, + 0, + 0, + 0, + 0, + 0, + 27107, + 0, + 0, + 0, + 0, + 27108, + 27117, + 0, + 0, + 0, + 0, + 27118, + 0, + 0, + 27124, + 0, + 27126, + 0, + 0, + 27130, + 27131, + 0, + 0, + 0, + 0, + 0, + 0, + 27147, + 0, + 0, + 0, + 0, + 27148, + 27149, + 0, + 0, + 0, + 0, + 27150, + 27151, + 0, + 27152, + 0, + 27159, + 0, + 0, + 0, + 27164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27175, + 0, + 27189, + 0, + 0, + 27191, + 0, + 27193, + 0, + 27195, + 0, + 27198, + 0, + 0, + 0, + 0, + 0, + 27200, + 0, + 0, + 0, + 0, + 27202, + 0, + 0, + 0, + 0, + 27203, + 0, + 0, + 27204, + 0, + 0, + 27206, + 0, + 27207, + 0, + 0, + 0, + 0, + 27209, + 0, + 0, + 0, + 27213, + 0, + 0, + 27216, + 27219, + 27220, + 27222, + 27223, + 0, + 27224, + 0, + 27225, + 27226, + 0, + 0, + 27233, + 0, + 0, + 0, + 0, + 27235, + 0, + 27237, + 0, + 27238, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27239, + 0, + 27242, + 27243, + 0, + 27250, + 0, + 0, + 0, + 27251, + 0, + 27253, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27254, + 27255, + 27258, + 0, + 0, + 0, + 27259, + 0, + 0, + 0, + 0, + 0, + 0, + 27267, + 0, + 27276, + 27278, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27296, + 27297, + 27301, + 0, + 0, + 0, + 0, + 0, + 0, + 27302, + 0, + 0, + 0, + 0, + 0, + 0, + 27312, + 27313, + 0, + 0, + 0, + 0, + 0, + 27318, + 0, + 27320, + 0, + 27329, + 0, + 27330, + 27331, + 0, + 27332, + 0, + 0, + 0, + 0, + 27340, + 0, + 0, + 0, + 27348, + 0, + 0, + 0, + 0, + 0, + 0, + 27350, + 0, + 27351, + 0, + 0, + 0, + 0, + 27355, + 0, + 0, + 27358, + 27359, + 27361, + 0, + 0, + 0, + 27365, + 0, + 27367, + 0, + 27376, + 27378, + 0, + 0, + 27379, + 0, + 0, + 0, + 0, + 0, + 0, + 27396, + 0, + 27397, + 27404, + 0, + 0, + 0, + 0, + 0, + 27408, + 0, + 0, + 0, + 0, + 27453, + 0, + 0, + 0, + 27456, + 0, + 0, + 0, + 27458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27459, + 0, + 0, + 0, + 27460, + 0, + 0, + 27461, + 0, + 27465, + 27467, + 0, + 0, + 27469, + 0, + 27470, + 0, + 27471, + 0, + 27477, + 27482, + 0, + 0, + 0, + 0, + 0, + 0, + 27484, + 0, + 0, + 0, + 0, + 0, + 0, + 27485, + 0, + 0, + 0, + 0, + 0, + 27493, + 0, + 27494, + 27502, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27511, + 27532, + 0, + 0, + 0, + 27533, + 27545, + 0, + 0, + 0, + 27546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27547, + 0, + 0, + 27549, + 27550, + 0, + 27551, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27555, + 0, + 0, + 27571, + 0, + 27573, + 27574, + 27575, + 27577, + 0, + 27578, + 0, + 0, + 27579, + 27585, + 0, + 0, + 0, + 0, + 0, + 27586, + 0, + 0, + 27588, + 27589, + 0, + 0, + 0, + 0, + 27596, + 0, + 0, + 27600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27610, + 0, + 0, + 0, + 27618, + 0, + 0, + 27620, + 0, + 0, + 0, + 27631, + 0, + 0, + 27632, + 27634, + 0, + 27636, + 27638, + 0, + 0, + 0, + 27643, + 0, + 27644, + 27649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27651, + 27660, + 0, + 27661, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27662, + 0, + 0, + 27664, + 0, + 27665, + 0, + 0, + 0, + 27669, + 0, + 27671, + 0, + 0, + 0, + 27673, + 27674, + 0, + 0, + 0, + 27682, + 0, + 0, + 0, + 27711, + 0, + 27712, + 27713, + 27719, + 27720, + 0, + 0, + 27728, + 0, + 27729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27731, + 0, + 0, + 27732, + 0, + 27733, + 0, + 27738, + 0, + 0, + 0, + 27742, + 0, + 0, + 0, + 27743, + 27744, + 0, + 0, + 0, + 0, + 0, + 0, + 27745, + 27746, + 0, + 0, + 0, + 27747, + 27748, + 27751, + 27752, + 0, + 0, + 0, + 27768, + 27770, + 0, + 0, + 0, + 27774, + 27775, + 0, + 27776, + 27777, + 0, + 0, + 27781, + 0, + 27784, + 0, + 27786, + 0, + 0, + 27791, + 0, + 27792, + 27793, + 27804, + 0, + 27812, + 27813, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27814, + 0, + 27825, + 0, + 27827, + 0, + 0, + 0, + 0, + 27828, + 27861, + 27862, + 0, + 0, + 0, + 27864, + 0, + 0, + 0, + 27865, + 27884, + 0, + 27889, + 0, + 0, + 0, + 0, + 0, + 27890, + 0, + 27891, + 0, + 0, + 0, + 27892, + 0, + 0, + 0, + 0, + 0, + 27897, + 27898, + 0, + 0, + 27899, + 0, + 0, + 0, + 27901, + 27905, + 0, + 0, + 27920, + 0, + 0, + 27921, + 0, + 27922, + 0, + 0, + 0, + 27931, + 27934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27941, + 0, + 27942, + 0, + 27945, + 0, + 27947, + 27954, + 0, + 0, + 0, + 0, + 27960, + 27963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27964, + 27965, + 0, + 0, + 0, + 27967, + 0, + 27969, + 27975, + 0, + 27976, + 27977, + 0, + 27981, + 0, + 27983, + 28051, + 28052, + 0, + 0, + 0, + 0, + 0, + 28056, + 0, + 0, + 0, + 0, + 0, + 0, + 28058, + 28059, + 0, + 0, + 28061, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28063, + 0, + 0, + 0, + 0, + 0, + 0, + 28066, + 0, + 0, + 0, + 0, + 0, + 0, + 28069, + 28070, + 28072, + 0, + 28073, + 0, + 0, + 28074, + 0, + 0, + 0, + 0, + 28075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28078, + 0, + 0, + 0, + 0, + 28085, + 0, + 0, + 0, + 0, + 28086, + 0, + 0, + 0, + 0, + 0, + 0, + 28088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28090, + 0, + 28097, + 28114, + 28115, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28116, + 0, + 0, + 0, + 0, + 0, + 28118, + 0, + 28129, + 0, + 28131, + 0, + 0, + 28135, + 0, + 0, + 0, + 28140, + 28141, + 0, + 0, + 0, + 28146, + 0, + 0, + 0, + 0, + 28152, + 0, + 0, + 0, + 0, + 28155, + 28157, + 28161, + 0, + 0, + 0, + 0, + 28166, + 0, + 28167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28172, + 0, + 0, + 0, + 0, + 0, + 0, + 28173, + 0, + 0, + 28175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28178, + 28188, + 0, + 28190, + 0, + 0, + 0, + 0, + 0, + 28191, + 0, + 28193, + 28206, + 0, + 0, + 28207, + 28209, + 0, + 28211, + 0, + 28213, + 0, + 0, + 0, + 28215, + 28216, + 28217, + 0, + 28222, + 0, + 28223, + 28225, + 0, + 0, + 0, + 28226, + 0, + 28227, + 28229, + 28232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28235, + 0, + 28241, + 0, + 0, + 28242, + 0, + 0, + 0, + 0, + 28243, + 0, + 0, + 0, + 28245, + 0, + 0, + 0, + 28248, + 28250, + 0, + 28251, + 28252, + 0, + 0, + 0, + 0, + 0, + 0, + 28253, + 0, + 0, + 28254, + 28255, + 0, + 0, + 28256, + 0, + 0, + 28258, + 0, + 0, + 0, + 0, + 0, + 28259, + 0, + 0, + 28260, + 0, + 0, + 28261, + 0, + 0, + 0, + 0, + 28262, + 28263, + 0, + 0, + 28264, + 0, + 0, + 0, + 28266, + 0, + 28268, + 28269, + 0, + 28270, + 28272, + 28274, + 0, + 28277, + 28278, + 0, + 0, + 0, + 28279, + 0, + 28280, + 28281, + 28283, + 0, + 28292, + 0, + 28294, + 0, + 28297, + 0, + 0, + 0, + 0, + 28299, + 0, + 0, + 0, + 0, + 0, + 28300, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28302, + 28303, + 0, + 0, + 0, + 0, + 28304, + 0, + 0, + 28305, + 0, + 28312, + 0, + 28313, + 28314, + 0, + 0, + 0, + 0, + 0, + 0, + 28315, + 0, + 0, + 0, + 28320, + 28321, + 0, + 0, + 28328, + 0, + 0, + 0, + 28329, + 28338, + 0, + 28339, + 0, + 0, + 28344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28347, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28348, + 0, + 0, + 0, + 0, + 0, + 28411, + 0, + 28412, + 28413, + 0, + 28416, + 0, + 0, + 0, + 28420, + 0, + 0, + 0, + 0, + 0, + 28421, + 0, + 0, + 0, + 0, + 28423, + 0, + 0, + 0, + 28424, + 0, + 0, + 28428, + 0, + 0, + 0, + 0, + 0, + 28429, + 0, + 0, + 0, + 28431, + 28434, + 0, + 28458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28464, + 0, + 0, + 0, + 0, + 28465, + 0, + 28467, + 0, + 0, + 0, + 0, + 0, + 0, + 28471, + 0, + 0, + 0, + 0, + 28474, + 0, + 28480, + 0, + 28481, + 0, + 0, + 28485, + 0, + 0, + 0, + 0, + 28486, + 28488, + 0, + 0, + 28489, + 0, + 0, + 0, + 0, + 28492, + 0, + 0, + 0, + 28495, + 0, + 28497, + 0, + 28499, + 0, + 0, + 0, + 0, + 28500, + 0, + 0, + 28502, + 28503, + 0, + 0, + 0, + 28508, + 0, + 0, + 0, + 28510, + 0, + 0, + 28512, + 28513, + 28514, + 28521, + 0, + 28526, + 0, + 28527, + 28528, + 0, + 0, + 0, + 0, + 28529, + 0, + 0, + 28532, + 0, + 0, + 28537, + 28538, + 0, + 0, + 0, + 28539, + 0, + 28548, + 0, + 28553, + 28554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28560, + 28563, + 0, + 0, + 28564, + 0, + 0, + 0, + 0, + 28565, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28566, + 28568, + 0, + 0, + 0, + 0, + 0, + 0, + 28569, + 0, + 0, + 0, + 28570, + 0, + 28572, + 28573, + 0, + 0, + 0, + 0, + 28575, + 0, + 0, + 0, + 0, + 28576, + 28581, + 28588, + 0, + 0, + 28589, + 0, + 0, + 0, + 28590, + 28595, + 0, + 28598, + 0, + 0, + 28601, + 0, + 0, + 28605, + 0, + 0, + 0, + 0, + 28614, + 28615, + 28619, + 0, + 0, + 0, + 0, + 0, + 0, + 28620, + 0, + 28626, + 0, + 0, + 28628, + 0, + 28631, + 0, + 28632, + 0, + 0, + 0, + 0, + 0, + 0, + 28635, + 0, + 0, + 0, + 28637, + 28638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28639, + 0, + 28643, + 0, + 0, + 28652, + 0, + 0, + 0, + 28662, + 0, + 28670, + 28671, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28672, + 28673, + 28675, + 28676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28691, + 0, + 0, + 0, + 28695, + 0, + 0, + 0, + 28696, + 0, + 28697, + 28698, + 0, + 28705, + 0, + 28707, + 28708, + 28710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28711, + 28728, + 0, + 0, + 0, + 28736, + 0, + 0, + 0, + 28737, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28738, + 0, + 28739, + 0, + 28741, + 0, + 0, + 28742, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28745, + 0, + 0, + 0, + 0, + 0, + 0, + 28749, + 28750, + 28752, + 28754, + 28756, + 0, + 28757, + 0, + 0, + 0, + 0, + 28759, + 28760, + 0, + 0, + 0, + 0, + 0, + 0, + 28762, + 0, + 0, + 0, + 28764, + 0, + 0, + 0, + 0, + 0, + 0, + 28766, + 0, + 28767, + 28768, + 0, + 0, + 0, + 0, + 28769, + 28770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28772, + 0, + 28773, + 0, + 28782, + 0, + 0, + 0, + 0, + 0, + 0, + 28784, + 0, + 28785, + 0, + 28786, + 0, + 0, + 0, + 28787, + 0, + 0, + 0, + 28797, + 0, + 0, + 0, + 0, + 0, + 0, + 28799, + 0, + 0, + 28801, + 0, + 0, + 0, + 0, + 28802, + 0, + 28805, + 0, + 0, + 28806, + 0, + 0, + 28807, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28808, + 0, + 0, + 0, + 0, + 0, + 28810, + 28812, + 0, + 0, + 28816, + 28819, + 0, + 0, + 28821, + 0, + 28826, + 0, + 0, + 0, + 28842, + 28852, + 0, + 0, + 28853, + 0, + 28854, + 28855, + 0, + 0, + 0, + 28857, + 0, + 0, + 0, + 28858, + 0, + 28867, + 28868, + 28869, + 0, + 0, + 0, + 28874, + 28880, + 28882, + 28890, + 28892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28895, + 0, + 0, + 0, + 28898, + 28899, + 0, + 0, + 0, + 28900, + 0, + 0, + 28904, + 0, + 28906, + 0, + 0, + 0, + 0, + 28907, + 0, + 0, + 0, + 0, + 0, + 0, + 28908, + 0, + 0, + 0, + 28910, + 0, + 28914, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28915, + 28916, + 28919, + 0, + 0, + 28920, + 0, + 28921, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28924, + 0, + 0, + 0, + 0, + 28926, + 28929, + 0, + 0, + 0, + 28930, + 0, + 28936, + 0, + 28939, + 0, + 0, + 0, + 0, + 28942, + 0, + 0, + 0, + 0, + 0, + 0, + 28956, + 0, + 0, + 0, + 28966, + 0, + 0, + 0, + 0, + 28967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28968, + 0, + 28971, + 0, + 28975, + 28976, + 0, + 28982, + 28983, + 0, + 0, + 28984, + 28989, + 28996, + 28997, + 28998, + 0, + 0, + 0, + 0, + 0, + 0, + 28999, + 0, + 0, + 0, + 0, + 0, + 29000, + 0, + 29001, + 0, + 0, + 0, + 29009, + 0, + 0, + 29011, + 0, + 0, + 29021, + 0, + 0, + 0, + 0, + 29024, + 0, + 29025, + 0, + 0, + 0, + 0, + 0, + 29026, + 0, + 0, + 0, + 29036, + 0, + 0, + 0, + 29037, + 0, + 0, + 0, + 0, + 29038, + 0, + 29045, + 0, + 29047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29051, + 0, + 0, + 0, + 29054, + 29056, + 29062, + 0, + 29070, + 29082, + 0, + 0, + 0, + 29083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29084, + 0, + 0, + 0, + 0, + 29085, + 29088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29090, + 29097, + 0, + 0, + 0, + 29103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29105, + 0, + 0, + 0, + 0, + 0, + 29107, + 0, + 29109, + 0, + 0, + 0, + 29115, + 0, + 0, + 29120, + 0, + 0, + 29138, + 29140, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29152, + 0, + 29160, + 29174, + 0, + 29176, + 0, + 0, + 29180, + 0, + 29181, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29228, + 0, + 0, + 29229, + 0, + 0, + 29230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29234, + 0, + 0, + 0, + 29241, + 0, + 29245, + 0, + 29248, + 0, + 29250, + 29256, + 29280, + 0, + 29282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29285, + 0, + 0, + 29286, + 29291, + 29292, + 0, + 0, + 0, + 0, + 29294, + 0, + 29295, + 0, + 0, + 0, + 0, + 0, + 29296, + 29297, + 29298, + 29300, + 0, + 29302, + 0, + 0, + 29304, + 29307, + 0, + 29312, + 0, + 0, + 0, + 29322, + 0, + 0, + 29323, + 0, + 0, + 29324, + 29326, + 29328, + 0, + 29335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29338, + 29339, + 0, + 0, + 0, + 0, + 0, + 29341, + 29343, + 0, + 0, + 0, + 0, + 29344, + 0, + 0, + 0, + 0, + 0, + 29345, + 0, + 0, + 0, + 0, + 29346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29347, + 29348, + 29349, + 0, + 0, + 29354, + 0, + 0, + 29355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29357, + 0, + 0, + 0, + 0, + 29364, + 0, + 29365, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29366, + 0, + 0, + 29368, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29378, + 0, + 29381, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29386, + 0, + 0, + 0, + 0, + 0, + 0, + 29389, + 0, + 0, + 0, + 29390, + 0, + 0, + 29391, + 29397, + 0, + 29398, + 29412, + 29414, + 29418, + 29419, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29420, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29423, + 0, + 0, + 0, + 29435, + 0, + 0, + 0, + 29437, + 0, + 0, + 29439, + 0, + 29441, + 0, + 0, + 0, + 0, + 29443, + 0, + 29446, + 29450, + 29452, + 0, + 0, + 0, + 0, + 0, + 29456, + 0, + 0, + 0, + 0, + 0, + 29461, + 0, + 0, + 0, + 29464, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29468, + 0, + 29473, + 0, + 0, + 0, + 29486, + 0, + 0, + 0, + 29490, + 0, + 0, + 0, + 29491, + 29492, + 0, + 0, + 29497, + 0, + 0, + 0, + 29498, + 0, + 29499, + 0, + 29502, + 29505, + 0, + 29509, + 0, + 0, + 0, + 29510, + 0, + 0, + 0, + 29512, + 0, + 0, + 0, + 29516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29518, + 0, + 29519, + 0, + 0, + 0, + 0, + 0, + 29520, + 29521, + 29529, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29530, + 0, + 0, + 29531, + 29538, + 0, + 29540, + 0, + 0, + 0, + 29542, + 0, + 29543, + 29544, + 29547, + 0, + 0, + 29548, + 0, + 0, + 0, + 29549, + 0, + 0, + 0, + 29550, + 0, + 0, + 29552, + 0, + 0, + 0, + 0, + 29558, + 29561, + 0, + 29562, + 29564, + 0, + 0, + 29565, + 0, + 0, + 29566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29578, + 29584, + 29586, + 29591, + 0, + 0, + 0, + 0, + 29593, + 29594, + 0, + 0, + 29597, + 0, + 0, + 29613, + 0, + 29614, + 0, + 29615, + 0, + 0, + 0, + 0, + 29616, + 29617, + 0, + 0, + 29625, + 0, + 0, + 0, + 29632, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29633, + 0, + 0, + 0, + 0, + 0, + 29634, + 29635, + 29637, + 0, + 29638, + 0, + 29641, + 29643, + 0, + 0, + 0, + 0, + 0, + 0, + 29644, + 0, + 29645, + 0, + 29649, + 0, + 0, + 0, + 29650, + 0, + 29653, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29656, + 29659, + 0, + 0, + 29660, + 0, + 0, + 0, + 29661, + 0, + 0, + 0, + 0, + 0, + 29664, + 0, + 0, + 0, + 29671, + 29673, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29675, + 0, + 29677, + 29679, + 0, + 0, + 29684, + 0, + 0, + 0, + 0, + 0, + 29685, + 0, + 0, + 0, + 29687, + 0, + 0, + 0, + 29688, + 0, + 29689, + 29690, + 29700, + 0, + 29701, + 0, + 0, + 0, + 29702, + 0, + 29706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29720, + 0, + 29721, + 0, + 29727, + 0, + 29733, + 29734, + 0, + 29750, + 29761, + 0, + 29763, + 0, + 0, + 0, + 0, + 0, + 29764, + 0, + 0, + 29765, + 0, + 0, + 0, + 29771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29772, + 0, + 0, + 0, + 29773, + 29774, + 29775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29822, + 0, + 0, + 0, + 29824, + 0, + 29825, + 0, + 0, + 0, + 0, + 0, + 29827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29829, + 0, + 29832, + 29834, + 0, + 0, + 29835, + 0, + 0, + 29837, + 29838, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29843, + 0, + 0, + 0, + 0, + 29844, + 29845, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29849, + 0, + 0, + 29869, + 29872, + 29890, + 29905, + 0, + 0, + 0, + 0, + 0, + 29907, + 29921, + 0, + 29922, + 0, + 0, + 29923, + 29926, + 29944, + 29946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29947, + 29948, + 0, + 0, + 0, + 29951, + 0, + 0, + 0, + 0, + 0, + 29953, + 0, + 0, + 29956, + 0, + 29957, + 0, + 0, + 29962, + 0, + 0, + 0, + 0, + 29971, + 0, + 0, + 0, + 29972, + 0, + 0, + 0, + 0, + 0, + 29978, + 0, + 29979, + 29992, + 30007, + 30008, + 30010, + 0, + 0, + 0, + 30013, + 0, + 0, + 0, + 0, + 30014, + 30016, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30017, + 0, + 0, + 0, + 0, + 0, + 30023, + 30031, + 0, + 0, + 30033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30034, + 0, + 30038, + 0, + 30039, + 0, + 30040, + 0, + 0, + 0, + 0, + 0, + 0, + 30067, + 30068, + 0, + 0, + 0, + 30069, + 0, + 30072, + 0, + 0, + 0, + 30073, + 0, + 0, + 0, + 0, + 30075, + 0, + 0, + 0, + 0, + 0, + 0, + 30079, + 0, + 0, + 30080, + 0, + 0, + 0, + 0, + 0, + 30082, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30084, + 30090, + 0, + 0, + 30091, + 0, + 0, + 0, + 0, + 30098, + 30118, + 0, + 30119, + 0, + 30121, + 30130, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30131, + 30132, + 30133, + 0, + 0, + 0, + 0, + 0, + 0, + 30135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30136, + 0, + 0, + 30137, + 30138, + 0, + 0, + 0, + 30139, + 30146, + 0, + 0, + 0, + 0, + 0, + 30147, + 0, + 0, + 30148, + 30151, + 0, + 0, + 0, + 30168, + 0, + 30172, + 30173, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30180, + 30181, + 0, + 30192, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30194, + 30196, + 0, + 0, + 30199, + 0, + 0, + 30202, + 0, + 0, + 0, + 0, + 30203, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30213, + 0, + 0, + 0, + 30216, + 0, + 0, + 30217, + 0, + 0, + 0, + 30218, + 0, + 0, + 0, + 0, + 30219, + 0, + 30220, + 0, + 30222, + 30227, + 0, + 0, + 0, + 0, + 0, + 30231, + 0, + 0, + 30233, + 30235, + 0, + 0, + 0, + 0, + 30238, + 0, + 30240, + 30243, + 30245, + 0, + 30250, + 30252, + 0, + 0, + 0, + 30269, + 0, + 0, + 30271, + 30272, + 0, + 0, + 0, + 30278, + 30280, + 0, + 0, + 30282, + 0, + 30284, + 0, + 30294, + 0, + 0, + 0, + 0, + 30295, + 30296, + 0, + 0, + 0, + 0, + 0, + 30298, + 30299, + 30302, + 30304, + 30306, + 0, + 0, + 0, + 0, + 0, + 0, + 30316, + 30317, + 0, + 0, + 0, + 30318, + 0, + 0, + 0, + 30319, + 0, + 30320, + 30322, + 30326, + 0, + 0, + 0, + 0, + 0, + 30327, + 0, + 30332, + 30348, + 30349, + 0, + 0, + 30356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30357, + 0, + 30358, + 0, + 30359, + 30360, + 0, + 0, + 30365, + 30366, + 30378, + 0, + 0, + 0, + 0, + 30379, + 0, + 0, + 30381, + 0, + 30385, + 0, + 30388, + 30397, + 0, + 0, + 0, + 30401, + 0, + 0, + 0, + 0, + 30403, + 0, + 0, + 0, + 0, + 0, + 30404, + 0, + 0, + 30405, + 0, + 30406, + 30408, + 0, + 30409, + 0, + 30410, + 0, + 0, + 0, + 30417, + 0, + 0, + 30418, + 30419, + 0, + 30420, + 0, + 30424, + 0, + 0, + 0, + 30427, + 30430, + 30432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30436, + 0, + 30437, + 30438, + 0, + 30441, + 30442, + 0, + 0, + 0, + 30445, + 0, + 0, + 0, + 0, + 30452, + 30456, + 30457, + 0, + 0, + 0, + 30458, + 0, + 30464, + 0, + 0, + 0, + 0, + 0, + 0, + 30467, + 0, + 30469, + 0, + 0, + 0, + 0, + 0, + 30477, + 0, + 0, + 30484, + 0, + 0, + 0, + 0, + 0, + 30485, + 0, + 0, + 0, + 0, + 0, + 30486, + 30487, + 30497, + 30498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30505, + 0, + 30508, + 0, + 0, + 0, + 30509, + 30510, + 0, + 30514, + 30516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30523, + 0, + 30524, + 0, + 30525, + 0, + 0, + 0, + 0, + 30537, + 0, + 0, + 30538, + 0, + 0, + 0, + 0, + 0, + 30553, + 0, + 0, + 30555, + 30556, + 30558, + 30559, + 30560, + 0, + 0, + 30561, + 0, + 30562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30563, + 30570, + 30571, + 0, + 30586, + 30587, + 0, + 0, + 30590, + 0, + 0, + 30594, + 0, + 0, + 0, + 0, + 30611, + 30612, + 30623, + 30634, + 0, + 0, + 30636, + 30640, + 30655, + 30656, + 0, + 30657, + 0, + 0, + 30658, + 30669, + 0, + 30670, + 0, + 30676, + 30678, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30695, + 0, + 0, + 30698, + 0, + 0, + 0, + 0, + 30700, + 0, + 0, + 0, + 0, + 30701, + 0, + 30702, + 30703, + 0, + 0, + 0, + 0, + 30707, + 0, + 0, + 0, + 30709, + 0, + 0, + 30710, + 30719, + 30729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30731, + 0, + 0, + 30733, + 0, + 0, + 0, + 30734, + 0, + 0, + 0, + 0, + 0, + 30736, + 30737, + 0, + 0, + 0, + 30740, + 0, + 0, + 0, + 30743, + 0, + 30746, + 0, + 30747, + 30748, + 0, + 0, + 30751, + 30752, + 30753, + 0, + 0, + 0, + 30754, + 0, + 0, + 30760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30763, + 0, + 30764, + 0, + 0, + 30766, + 0, + 30769, + 30770, + 30771, + 30774, + 30777, + 0, + 0, + 30779, + 30780, + 30781, + 0, + 0, + 0, + 0, + 30790, + 0, + 0, + 0, + 30792, + 0, + 0, + 0, + 0, + 30810, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30812, + 30819, + 0, + 0, + 30823, + 30824, + 0, + 30825, + 0, + 30827, + 0, + 0, + 0, + 0, + 0, + 0, + 30828, + 0, + 0, + 30830, + 0, + 0, + 0, + 30834, + 0, + 30835, + 0, + 30837, + 30838, + 0, + 30845, + 0, + 0, + 0, + 0, + 0, + 30846, + 30847, + 0, + 0, + 30849, + 0, + 30851, + 0, + 0, + 0, + 0, + 0, + 30852, + 30858, + 0, + 0, + 30859, + 0, + 30865, + 0, + 0, + 30866, + 0, + 0, + 30868, + 0, + 0, + 30869, + 0, + 0, + 0, + 30881, + 30883, + 0, + 0, + 0, + 0, + 0, + 30889, + 0, + 30891, + 0, + 0, + 0, + 0, + 30894, + 0, + 30895, + 0, + 30897, + 0, + 30898, + 0, + 0, + 0, + 30904, + 30906, + 0, + 30909, + 0, + 0, + 0, + 0, + 0, + 0, + 30910, + 0, + 0, + 0, + 30915, + 30933, + 30942, + 0, + 0, + 0, + 0, + 30943, + 0, + 0, + 30945, + 0, + 0, + 0, + 0, + 0, + 0, + 30946, + 0, + 0, + 30947, + 0, + 0, + 30955, + 30956, + 0, + 0, + 30960, + 0, + 0, + 30961, + 30962, + 30966, + 0, + 0, + 30969, + 30974, + 0, + 0, + 0, + 30976, + 0, + 0, + 30977, + 0, + 30978, + 30982, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30994, + 30995, + 30998, + 0, + 31000, + 0, + 0, + 31001, + 0, + 0, + 31003, + 31005, + 0, + 0, + 31006, + 31011, + 0, + 0, + 31014, + 0, + 31016, + 0, + 0, + 0, + 0, + 31018, + 0, + 0, + 31020, + 31023, + 31024, + 31025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31027, + 31028, + 31029, + 0, + 0, + 0, + 0, + 0, + 0, + 31032, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31036, + 31037, + 31038, + 0, + 0, + 0, + 31041, + 31043, + 31045, + 0, + 31047, + 0, + 0, + 0, + 31048, + 0, + 31049, + 0, + 0, + 0, + 31053, + 31054, + 31055, + 0, + 0, + 31063, + 0, + 0, + 0, + 0, + 0, + 31066, + 0, + 31068, + 31071, + 0, + 0, + 0, + 31072, + 31073, + 0, + 0, + 0, + 0, + 31075, + 0, + 0, + 31076, + 0, + 0, + 0, + 31077, + 31079, + 0, + 31080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31087, + 0, + 31142, + 0, + 31144, + 0, + 0, + 31145, + 31146, + 31147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31149, + 0, + 31151, + 31152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31162, + 31171, + 31174, + 31175, + 0, + 0, + 0, + 31176, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31179, + 0, + 0, + 0, + 31186, + 0, + 0, + 0, + 31192, + 31195, + 0, + 0, + 31196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31198, + 0, + 0, + 0, + 0, + 0, + 31199, + 0, + 0, + 0, + 31205, + 0, + 0, + 0, + 0, + 31211, + 31215, + 0, + 0, + 0, + 0, + 31231, + 0, + 31232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31233, + 31236, + 31253, + 0, + 31254, + 0, + 0, + 0, + 0, + 0, + 0, + 31255, + 0, + 0, + 31257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31258, + 31259, + 0, + 0, + 31260, + 0, + 31261, + 0, + 0, + 0, + 0, + 0, + 31262, + 31263, + 0, + 0, + 31264, + 0, + 31266, + 0, + 31267, + 0, + 0, + 0, + 0, + 0, + 31281, + 0, + 31282, + 0, + 31284, + 0, + 0, + 31285, + 31287, + 31288, + 0, + 0, + 31290, + 0, + 0, + 0, + 31292, + 31295, + 0, + 31299, + 0, + 31300, + 0, + 0, + 0, + 0, + 0, + 31302, + 0, + 0, + 0, + 0, + 31303, + 0, + 0, + 0, + 0, + 0, + 0, + 31304, + 0, + 0, + 0, + 0, + 0, + 31305, + 31308, + 31309, + 31315, + 0, + 31317, + 0, + 0, + 0, + 0, + 0, + 31323, + 0, + 31324, + 0, + 0, + 0, + 0, + 0, + 31325, + 31327, + 0, + 0, + 31331, + 0, + 0, + 0, + 0, + 0, + 31333, + 0, + 0, + 0, + 0, + 0, + 31336, + 0, + 0, + 31337, + 0, + 0, + 0, + 0, + 0, + 0, + 31338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31339, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31342, + 0, + 0, + 0, + 0, + 31345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31347, + 0, + 0, + 0, + 0, + 0, + 0, + 31348, + 0, + 0, + 31350, + 31351, + 0, + 31352, + 0, + 0, + 31354, + 0, + 0, + 0, + 0, + 31355, + 0, + 0, + 31356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31363, + 0, + 31372, + 0, + 0, + 31373, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31376, + 0, + 31388, + 0, + 31389, + 0, + 31392, + 0, + 31401, + 0, + 31405, + 31407, + 31408, + 0, + 31409, + 0, + 0, + 0, + 0, + 0, + 0, + 31413, + 31415, + 0, + 0, + 0, + 31416, + 31418, + 0, + 0, + 0, + 0, + 0, + 0, + 31422, + 31423, + 0, + 0, + 31424, + 0, + 31425, + 31432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31434, + 0, + 0, + 0, + 0, + 0, + 0, + 31435, + 0, + 0, + 0, + 0, + 31438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31442, + 0, + 31444, + 0, + 31448, + 0, + 0, + 31451, + 0, + 0, + 0, + 0, + 31452, + 0, + 31461, + 31465, + 0, + 0, + 31466, + 0, + 0, + 31467, + 0, + 0, + 31468, + 0, + 0, + 0, + 31469, + 31473, + 0, + 31476, + 0, + 0, + 0, + 0, + 31489, + 31490, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31492, + 31493, + 31494, + 0, + 0, + 0, + 0, + 31501, + 31504, + 31505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31509, + 0, + 0, + 0, + 0, + 31510, + 0, + 0, + 31511, + 0, + 0, + 31513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31514, + 0, + 31522, + 31536, + 31539, + 31540, + 0, + 31541, + 0, + 0, + 0, + 0, + 0, + 0, + 31546, + 31553, + 31559, + 0, + 0, + 0, + 31560, + 31561, + 31562, + 0, + 0, + 31564, + 31567, + 0, + 31569, + 0, + 0, + 0, + 31570, + 0, + 0, + 0, + 0, + 31571, + 0, + 0, + 0, + 0, + 0, + 0, + 31572, + 31574, + 31580, + 31581, + 0, + 0, + 31582, + 31584, + 31585, + 31586, + 31595, + 0, + 31596, + 0, + 0, + 0, + 0, + 31597, + 0, + 31599, + 0, + 31600, + 31601, + 0, + 0, + 31603, + 31604, + 0, + 0, + 31608, + 31610, + 0, + 0, + 0, + 31611, + 0, + 31615, + 0, + 0, + 0, + 0, + 31616, + 0, + 0, + 0, + 0, + 0, + 0, + 31617, + 0, + 0, + 0, + 0, + 0, + 31618, + 0, + 0, + 0, + 0, + 0, + 0, + 31621, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31622, + 31625, + 0, + 0, + 0, + 0, + 31627, + 0, + 31641, + 0, + 0, + 31642, + 0, + 0, + 31643, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31644, + 0, + 31646, + 0, + 0, + 0, + 0, + 31648, + 0, + 0, + 0, + 31652, + 0, + 0, + 0, + 31657, + 0, + 0, + 31676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31689, + 31691, + 31692, + 0, + 31694, + 0, + 0, + 0, + 31696, + 0, + 31702, + 0, + 31703, + 0, +} + +var kStaticDictionaryWords = [31705]dictWord{ + dictWord{0, 0, 0}, + dictWord{8, 0, 1002}, + dictWord{136, 0, 1015}, + dictWord{4, 0, 683}, + dictWord{4, 10, 325}, + dictWord{138, 10, 125}, + dictWord{7, 11, 572}, + dictWord{ + 9, + 11, + 592, + }, + dictWord{11, 11, 680}, + dictWord{11, 11, 842}, + dictWord{11, 11, 924}, + dictWord{12, 11, 356}, + dictWord{12, 11, 550}, + dictWord{13, 11, 317}, + dictWord{13, 11, 370}, + dictWord{13, 11, 469}, + dictWord{13, 11, 471}, + dictWord{14, 11, 397}, + dictWord{18, 11, 69}, + dictWord{146, 11, 145}, + dictWord{ + 134, + 0, + 1265, + }, + dictWord{136, 11, 534}, + dictWord{134, 0, 1431}, + dictWord{11, 0, 138}, + dictWord{140, 0, 40}, + dictWord{4, 0, 155}, + dictWord{7, 0, 1689}, + dictWord{ + 4, + 10, + 718, + }, + dictWord{135, 10, 1216}, + dictWord{4, 0, 245}, + dictWord{5, 0, 151}, + dictWord{5, 0, 741}, + dictWord{6, 0, 1147}, + dictWord{7, 0, 498}, + dictWord{7, 0, 870}, + dictWord{7, 0, 1542}, + dictWord{12, 0, 213}, + dictWord{14, 0, 36}, + dictWord{14, 0, 391}, + dictWord{17, 0, 111}, + dictWord{18, 0, 6}, + dictWord{18, 0, 46}, + dictWord{ + 18, + 0, + 151, + }, + dictWord{19, 0, 36}, + dictWord{20, 0, 32}, + dictWord{20, 0, 56}, + dictWord{20, 0, 69}, + dictWord{20, 0, 102}, + dictWord{21, 0, 4}, + dictWord{22, 0, 8}, + dictWord{ + 22, + 0, + 10, + }, + dictWord{22, 0, 14}, + dictWord{150, 0, 31}, + dictWord{4, 0, 624}, + dictWord{135, 0, 1752}, + dictWord{5, 10, 124}, + dictWord{5, 10, 144}, + dictWord{6, 10, 548}, + dictWord{7, 10, 15}, + dictWord{7, 10, 153}, + dictWord{137, 10, 629}, + dictWord{6, 0, 503}, + dictWord{9, 0, 586}, + dictWord{13, 0, 468}, + dictWord{14, 0, 66}, + dictWord{ + 16, + 0, + 58, + }, + dictWord{7, 10, 1531}, + dictWord{8, 10, 416}, + dictWord{9, 10, 275}, + dictWord{10, 10, 100}, + dictWord{11, 10, 658}, + dictWord{11, 10, 979}, + dictWord{ + 12, + 10, + 86, + }, + dictWord{14, 10, 207}, + dictWord{15, 10, 20}, + dictWord{143, 10, 25}, + dictWord{5, 0, 603}, + dictWord{7, 0, 1212}, + dictWord{9, 0, 565}, + dictWord{ + 14, + 0, + 301, + }, + dictWord{5, 10, 915}, + dictWord{6, 10, 1783}, + dictWord{7, 10, 211}, + dictWord{7, 10, 1353}, + dictWord{9, 10, 83}, + dictWord{10, 10, 376}, + dictWord{ + 10, + 10, + 431, + }, + dictWord{11, 10, 543}, + dictWord{12, 10, 664}, + dictWord{13, 10, 280}, + dictWord{13, 10, 428}, + dictWord{14, 10, 128}, + dictWord{17, 10, 52}, + dictWord{ + 145, + 10, + 81, + }, + dictWord{4, 0, 492}, + dictWord{133, 0, 451}, + dictWord{135, 0, 835}, + dictWord{141, 0, 70}, + dictWord{132, 0, 539}, + dictWord{7, 11, 748}, + dictWord{ + 139, + 11, + 700, + }, + dictWord{7, 11, 1517}, + dictWord{11, 11, 597}, + dictWord{14, 11, 76}, + dictWord{14, 11, 335}, + dictWord{148, 11, 33}, + dictWord{6, 0, 113}, + dictWord{135, 0, 436}, + dictWord{4, 10, 338}, + dictWord{133, 10, 400}, + dictWord{136, 0, 718}, + dictWord{133, 11, 127}, + dictWord{133, 11, 418}, + dictWord{ + 6, + 0, + 1505, + }, + dictWord{7, 0, 520}, + dictWord{6, 11, 198}, + dictWord{11, 10, 892}, + dictWord{140, 11, 83}, + dictWord{4, 10, 221}, + dictWord{5, 10, 659}, + dictWord{ + 5, + 10, + 989, + }, + dictWord{7, 10, 697}, + dictWord{7, 10, 1211}, + dictWord{138, 10, 284}, + dictWord{135, 0, 1070}, + dictWord{5, 11, 276}, + dictWord{6, 11, 55}, + dictWord{ + 135, + 11, + 1369, + }, + dictWord{134, 0, 1515}, + dictWord{6, 11, 1752}, + dictWord{136, 11, 726}, + dictWord{138, 10, 507}, + dictWord{15, 0, 78}, + dictWord{4, 10, 188}, + dictWord{135, 10, 805}, + dictWord{5, 10, 884}, + dictWord{139, 10, 991}, + dictWord{133, 11, 764}, + dictWord{134, 10, 1653}, + dictWord{6, 11, 309}, + dictWord{ + 7, + 11, + 331, + }, + dictWord{138, 11, 550}, + dictWord{135, 11, 1861}, + dictWord{132, 11, 348}, + dictWord{135, 11, 986}, + dictWord{135, 11, 1573}, + dictWord{ + 12, + 0, + 610, + }, + dictWord{13, 0, 431}, + dictWord{144, 0, 59}, + dictWord{9, 11, 799}, + dictWord{140, 10, 166}, + dictWord{134, 0, 1530}, + dictWord{132, 0, 750}, + dictWord{132, 0, 307}, + dictWord{133, 0, 964}, + dictWord{6, 11, 194}, + dictWord{7, 11, 133}, + dictWord{10, 11, 493}, + dictWord{10, 11, 570}, + dictWord{139, 11, 664}, + dictWord{5, 11, 24}, + dictWord{5, 11, 569}, + dictWord{6, 11, 3}, + dictWord{6, 11, 119}, + dictWord{6, 11, 143}, + dictWord{6, 11, 440}, + dictWord{7, 11, 295}, + dictWord{ + 7, + 11, + 599, + }, + dictWord{7, 11, 1686}, + dictWord{7, 11, 1854}, + dictWord{8, 11, 424}, + dictWord{9, 11, 43}, + dictWord{9, 11, 584}, + dictWord{9, 11, 760}, + dictWord{ + 10, + 11, + 148, + }, + dictWord{10, 11, 328}, + dictWord{11, 11, 159}, + dictWord{11, 11, 253}, + dictWord{11, 11, 506}, + dictWord{12, 11, 487}, + dictWord{12, 11, 531}, + dictWord{144, 11, 33}, + dictWord{136, 10, 760}, + dictWord{5, 11, 14}, + dictWord{5, 11, 892}, + dictWord{6, 11, 283}, + dictWord{7, 11, 234}, + dictWord{136, 11, 537}, + dictWord{135, 11, 1251}, + dictWord{4, 11, 126}, + dictWord{8, 11, 635}, + dictWord{147, 11, 34}, + dictWord{4, 11, 316}, + dictWord{135, 11, 1561}, + dictWord{ + 6, + 0, + 999, + }, + dictWord{6, 0, 1310}, + dictWord{137, 11, 861}, + dictWord{4, 11, 64}, + dictWord{5, 11, 352}, + dictWord{5, 11, 720}, + dictWord{6, 11, 368}, + dictWord{ + 139, + 11, + 359, + }, + dictWord{4, 0, 75}, + dictWord{5, 0, 180}, + dictWord{6, 0, 500}, + dictWord{7, 0, 58}, + dictWord{7, 0, 710}, + dictWord{10, 0, 645}, + dictWord{136, 10, 770}, + dictWord{133, 0, 649}, + dictWord{6, 0, 276}, + dictWord{7, 0, 282}, + dictWord{7, 0, 879}, + dictWord{7, 0, 924}, + dictWord{8, 0, 459}, + dictWord{9, 0, 599}, + dictWord{9, 0, 754}, + dictWord{11, 0, 574}, + dictWord{12, 0, 128}, + dictWord{12, 0, 494}, + dictWord{13, 0, 52}, + dictWord{13, 0, 301}, + dictWord{15, 0, 30}, + dictWord{143, 0, 132}, + dictWord{132, 0, 200}, + dictWord{4, 10, 89}, + dictWord{5, 10, 489}, + dictWord{6, 10, 315}, + dictWord{7, 10, 553}, + dictWord{7, 10, 1745}, + dictWord{138, 10, 243}, + dictWord{135, 11, 1050}, + dictWord{7, 0, 1621}, + dictWord{6, 10, 1658}, + dictWord{9, 10, 3}, + dictWord{10, 10, 154}, + dictWord{11, 10, 641}, + dictWord{13, 10, 85}, + dictWord{13, 10, 201}, + dictWord{141, 10, 346}, + dictWord{6, 11, 175}, + dictWord{137, 11, 289}, + dictWord{5, 11, 432}, + dictWord{133, 11, 913}, + dictWord{ + 6, + 0, + 225, + }, + dictWord{137, 0, 211}, + dictWord{7, 0, 718}, + dictWord{8, 0, 687}, + dictWord{139, 0, 374}, + dictWord{4, 10, 166}, + dictWord{133, 10, 505}, + dictWord{ + 9, + 0, + 110, + }, + dictWord{134, 10, 1670}, + dictWord{8, 0, 58}, + dictWord{9, 0, 724}, + dictWord{11, 0, 809}, + dictWord{13, 0, 113}, + dictWord{145, 0, 72}, + dictWord{6, 0, 345}, + dictWord{7, 0, 1247}, + dictWord{144, 11, 82}, + dictWord{5, 11, 931}, + dictWord{134, 11, 1698}, + dictWord{8, 0, 767}, + dictWord{8, 0, 803}, + dictWord{9, 0, 301}, + dictWord{137, 0, 903}, + dictWord{139, 0, 203}, + dictWord{134, 0, 1154}, + dictWord{7, 0, 1949}, + dictWord{136, 0, 674}, + dictWord{134, 0, 259}, + dictWord{ + 135, + 0, + 1275, + }, + dictWord{5, 11, 774}, + dictWord{6, 11, 1637}, + dictWord{6, 11, 1686}, + dictWord{134, 11, 1751}, + dictWord{134, 0, 1231}, + dictWord{7, 10, 445}, + dictWord{8, 10, 307}, + dictWord{8, 10, 704}, + dictWord{10, 10, 41}, + dictWord{10, 10, 439}, + dictWord{11, 10, 237}, + dictWord{11, 10, 622}, + dictWord{140, 10, 201}, + dictWord{136, 0, 254}, + dictWord{6, 11, 260}, + dictWord{135, 11, 1484}, + dictWord{139, 0, 277}, + dictWord{135, 10, 1977}, + dictWord{4, 10, 189}, + dictWord{ + 5, + 10, + 713, + }, + dictWord{6, 11, 573}, + dictWord{136, 10, 57}, + dictWord{138, 10, 371}, + dictWord{132, 10, 552}, + dictWord{134, 11, 344}, + dictWord{133, 0, 248}, + dictWord{9, 0, 800}, + dictWord{10, 0, 693}, + dictWord{11, 0, 482}, + dictWord{11, 0, 734}, + dictWord{11, 0, 789}, + dictWord{134, 11, 240}, + dictWord{4, 0, 116}, + dictWord{ + 5, + 0, + 95, + }, + dictWord{5, 0, 445}, + dictWord{7, 0, 1688}, + dictWord{8, 0, 29}, + dictWord{9, 0, 272}, + dictWord{11, 0, 509}, + dictWord{11, 0, 915}, + dictWord{4, 11, 292}, + dictWord{4, 11, 736}, + dictWord{5, 11, 871}, + dictWord{6, 11, 171}, + dictWord{6, 11, 1689}, + dictWord{7, 11, 1324}, + dictWord{7, 11, 1944}, + dictWord{9, 11, 415}, + dictWord{9, 11, 580}, + dictWord{14, 11, 230}, + dictWord{146, 11, 68}, + dictWord{7, 0, 490}, + dictWord{13, 0, 100}, + dictWord{143, 0, 75}, + dictWord{135, 0, 1641}, + dictWord{133, 0, 543}, + dictWord{7, 11, 209}, + dictWord{8, 11, 661}, + dictWord{10, 11, 42}, + dictWord{11, 11, 58}, + dictWord{12, 11, 58}, + dictWord{12, 11, 118}, + dictWord{141, 11, 32}, + dictWord{5, 0, 181}, + dictWord{8, 0, 41}, + dictWord{6, 11, 63}, + dictWord{135, 11, 920}, + dictWord{133, 0, 657}, + dictWord{133, 11, 793}, + dictWord{138, 0, 709}, + dictWord{7, 0, 25}, + dictWord{8, 0, 202}, + dictWord{138, 0, 536}, + dictWord{5, 11, 665}, + dictWord{135, 10, 1788}, + dictWord{145, 10, 49}, + dictWord{9, 0, 423}, + dictWord{140, 0, 89}, + dictWord{5, 11, 67}, + dictWord{6, 11, 62}, + dictWord{6, 11, 374}, + dictWord{135, 11, 1391}, + dictWord{8, 0, 113}, + dictWord{ + 9, + 0, + 877, + }, + dictWord{10, 0, 554}, + dictWord{11, 0, 83}, + dictWord{12, 0, 136}, + dictWord{19, 0, 109}, + dictWord{9, 11, 790}, + dictWord{140, 11, 47}, + dictWord{ + 138, + 10, + 661, + }, + dictWord{4, 0, 963}, + dictWord{10, 0, 927}, + dictWord{14, 0, 442}, + dictWord{135, 10, 1945}, + dictWord{133, 0, 976}, + dictWord{132, 0, 206}, + dictWord{ + 4, + 11, + 391, + }, + dictWord{135, 11, 1169}, + dictWord{134, 0, 2002}, + dictWord{6, 0, 696}, + dictWord{134, 0, 1008}, + dictWord{134, 0, 1170}, + dictWord{132, 11, 271}, + dictWord{7, 0, 13}, + dictWord{8, 0, 226}, + dictWord{10, 0, 537}, + dictWord{11, 0, 570}, + dictWord{11, 0, 605}, + dictWord{11, 0, 799}, + dictWord{11, 0, 804}, + dictWord{ + 12, + 0, + 85, + }, + dictWord{12, 0, 516}, + dictWord{12, 0, 623}, + dictWord{13, 0, 112}, + dictWord{13, 0, 361}, + dictWord{14, 0, 77}, + dictWord{14, 0, 78}, + dictWord{17, 0, 28}, + dictWord{19, 0, 110}, + dictWord{140, 11, 314}, + dictWord{132, 0, 769}, + dictWord{134, 0, 1544}, + dictWord{4, 0, 551}, + dictWord{137, 0, 678}, + dictWord{5, 10, 84}, + dictWord{134, 10, 163}, + dictWord{9, 0, 57}, + dictWord{9, 0, 459}, + dictWord{10, 0, 425}, + dictWord{11, 0, 119}, + dictWord{12, 0, 184}, + dictWord{12, 0, 371}, + dictWord{ + 13, + 0, + 358, + }, + dictWord{145, 0, 51}, + dictWord{5, 0, 188}, + dictWord{5, 0, 814}, + dictWord{8, 0, 10}, + dictWord{9, 0, 421}, + dictWord{9, 0, 729}, + dictWord{10, 0, 609}, + dictWord{11, 0, 689}, + dictWord{4, 11, 253}, + dictWord{5, 10, 410}, + dictWord{5, 11, 544}, + dictWord{7, 11, 300}, + dictWord{137, 11, 340}, + dictWord{134, 0, 624}, + dictWord{138, 11, 321}, + dictWord{135, 0, 1941}, + dictWord{18, 0, 130}, + dictWord{5, 10, 322}, + dictWord{8, 10, 186}, + dictWord{9, 10, 262}, + dictWord{10, 10, 187}, + dictWord{142, 10, 208}, + dictWord{5, 11, 53}, + dictWord{5, 11, 541}, + dictWord{6, 11, 94}, + dictWord{6, 11, 499}, + dictWord{7, 11, 230}, + dictWord{139, 11, 321}, + dictWord{133, 10, 227}, + dictWord{4, 0, 378}, + dictWord{4, 11, 920}, + dictWord{5, 11, 25}, + dictWord{5, 11, 790}, + dictWord{6, 11, 457}, + dictWord{135, 11, 853}, + dictWord{137, 0, 269}, + dictWord{132, 0, 528}, + dictWord{134, 0, 1146}, + dictWord{7, 10, 1395}, + dictWord{8, 10, 486}, + dictWord{9, 10, 236}, + dictWord{9, 10, 878}, + dictWord{10, 10, 218}, + dictWord{11, 10, 95}, + dictWord{19, 10, 17}, + dictWord{147, 10, 31}, + dictWord{7, 10, 2043}, + dictWord{8, 10, 672}, + dictWord{ + 141, + 10, + 448, + }, + dictWord{134, 0, 1105}, + dictWord{134, 0, 1616}, + dictWord{134, 11, 1765}, + dictWord{140, 11, 163}, + dictWord{5, 10, 412}, + dictWord{133, 11, 822}, + dictWord{132, 11, 634}, + dictWord{6, 0, 656}, + dictWord{134, 11, 1730}, + dictWord{134, 0, 1940}, + dictWord{5, 0, 104}, + dictWord{6, 0, 173}, + dictWord{ + 135, + 0, + 1631, + }, + dictWord{136, 10, 562}, + dictWord{6, 11, 36}, + dictWord{7, 11, 658}, + dictWord{8, 11, 454}, + dictWord{147, 11, 86}, + dictWord{5, 0, 457}, + dictWord{ + 134, + 10, + 1771, + }, + dictWord{7, 0, 810}, + dictWord{8, 0, 138}, + dictWord{8, 0, 342}, + dictWord{9, 0, 84}, + dictWord{10, 0, 193}, + dictWord{11, 0, 883}, + dictWord{140, 0, 359}, + dictWord{9, 0, 620}, + dictWord{135, 10, 1190}, + dictWord{137, 10, 132}, + dictWord{7, 11, 975}, + dictWord{137, 11, 789}, + dictWord{6, 0, 95}, + dictWord{6, 0, 1934}, + dictWord{136, 0, 967}, + dictWord{141, 11, 335}, + dictWord{6, 0, 406}, + dictWord{10, 0, 409}, + dictWord{10, 0, 447}, + dictWord{11, 0, 44}, + dictWord{140, 0, 100}, + dictWord{4, 10, 317}, + dictWord{135, 10, 1279}, + dictWord{132, 0, 477}, + dictWord{134, 0, 1268}, + dictWord{6, 0, 1941}, + dictWord{8, 0, 944}, + dictWord{5, 10, 63}, + dictWord{133, 10, 509}, + dictWord{132, 0, 629}, + dictWord{132, 11, 104}, + dictWord{4, 0, 246}, + dictWord{133, 0, 375}, + dictWord{6, 0, 1636}, + dictWord{ + 132, + 10, + 288, + }, + dictWord{135, 11, 1614}, + dictWord{9, 0, 49}, + dictWord{10, 0, 774}, + dictWord{8, 10, 89}, + dictWord{8, 10, 620}, + dictWord{11, 10, 628}, + dictWord{ + 12, + 10, + 322, + }, + dictWord{143, 10, 124}, + dictWord{4, 0, 282}, + dictWord{7, 0, 1034}, + dictWord{11, 0, 398}, + dictWord{11, 0, 634}, + dictWord{12, 0, 1}, + dictWord{12, 0, 79}, + dictWord{12, 0, 544}, + dictWord{14, 0, 237}, + dictWord{17, 0, 10}, + dictWord{146, 0, 20}, + dictWord{132, 0, 824}, + dictWord{7, 11, 45}, + dictWord{9, 11, 542}, + dictWord{ + 9, + 11, + 566, + }, + dictWord{138, 11, 728}, + dictWord{5, 0, 118}, + dictWord{5, 0, 499}, + dictWord{6, 0, 476}, + dictWord{6, 0, 665}, + dictWord{6, 0, 1176}, + dictWord{ + 6, + 0, + 1196, + }, + dictWord{7, 0, 600}, + dictWord{7, 0, 888}, + dictWord{135, 0, 1096}, + dictWord{7, 0, 296}, + dictWord{7, 0, 596}, + dictWord{8, 0, 560}, + dictWord{8, 0, 586}, + dictWord{9, 0, 612}, + dictWord{11, 0, 304}, + dictWord{12, 0, 46}, + dictWord{13, 0, 89}, + dictWord{14, 0, 112}, + dictWord{145, 0, 122}, + dictWord{5, 0, 894}, + dictWord{ + 6, + 0, + 1772, + }, + dictWord{9, 0, 1009}, + dictWord{138, 10, 120}, + dictWord{5, 11, 533}, + dictWord{7, 11, 755}, + dictWord{138, 11, 780}, + dictWord{151, 10, 1}, + dictWord{ + 6, + 0, + 1474, + }, + dictWord{7, 11, 87}, + dictWord{142, 11, 288}, + dictWord{139, 0, 366}, + dictWord{137, 10, 461}, + dictWord{7, 11, 988}, + dictWord{7, 11, 1939}, + dictWord{ + 9, + 11, + 64, + }, + dictWord{9, 11, 502}, + dictWord{12, 11, 7}, + dictWord{12, 11, 34}, + dictWord{13, 11, 12}, + dictWord{13, 11, 234}, + dictWord{147, 11, 77}, + dictWord{ + 7, + 0, + 1599, + }, + dictWord{7, 0, 1723}, + dictWord{8, 0, 79}, + dictWord{8, 0, 106}, + dictWord{8, 0, 190}, + dictWord{8, 0, 302}, + dictWord{8, 0, 383}, + dictWord{8, 0, 713}, + dictWord{ + 9, + 0, + 119, + }, + dictWord{9, 0, 233}, + dictWord{9, 0, 419}, + dictWord{9, 0, 471}, + dictWord{10, 0, 181}, + dictWord{10, 0, 406}, + dictWord{11, 0, 57}, + dictWord{11, 0, 85}, + dictWord{11, 0, 120}, + dictWord{11, 0, 177}, + dictWord{11, 0, 296}, + dictWord{11, 0, 382}, + dictWord{11, 0, 454}, + dictWord{11, 0, 758}, + dictWord{11, 0, 999}, + dictWord{ + 12, + 0, + 27, + }, + dictWord{12, 0, 98}, + dictWord{12, 0, 131}, + dictWord{12, 0, 245}, + dictWord{12, 0, 312}, + dictWord{12, 0, 446}, + dictWord{12, 0, 454}, + dictWord{13, 0, 25}, + dictWord{13, 0, 98}, + dictWord{13, 0, 426}, + dictWord{13, 0, 508}, + dictWord{14, 0, 70}, + dictWord{14, 0, 163}, + dictWord{14, 0, 272}, + dictWord{14, 0, 277}, + dictWord{ + 14, + 0, + 370, + }, + dictWord{15, 0, 95}, + dictWord{15, 0, 138}, + dictWord{15, 0, 167}, + dictWord{17, 0, 38}, + dictWord{148, 0, 96}, + dictWord{135, 10, 1346}, + dictWord{ + 10, + 0, + 200, + }, + dictWord{19, 0, 2}, + dictWord{151, 0, 22}, + dictWord{135, 11, 141}, + dictWord{134, 10, 85}, + dictWord{134, 0, 1759}, + dictWord{138, 0, 372}, + dictWord{ + 145, + 0, + 16, + }, + dictWord{8, 0, 943}, + dictWord{132, 11, 619}, + dictWord{139, 11, 88}, + dictWord{5, 11, 246}, + dictWord{8, 11, 189}, + dictWord{9, 11, 355}, + dictWord{ + 9, + 11, + 512, + }, + dictWord{10, 11, 124}, + dictWord{10, 11, 453}, + dictWord{11, 11, 143}, + dictWord{11, 11, 416}, + dictWord{11, 11, 859}, + dictWord{141, 11, 341}, + dictWord{ + 5, + 0, + 258, + }, + dictWord{134, 0, 719}, + dictWord{6, 0, 1798}, + dictWord{6, 0, 1839}, + dictWord{8, 0, 900}, + dictWord{10, 0, 874}, + dictWord{10, 0, 886}, + dictWord{ + 12, + 0, + 698, + }, + dictWord{12, 0, 732}, + dictWord{12, 0, 770}, + dictWord{16, 0, 106}, + dictWord{18, 0, 163}, + dictWord{18, 0, 170}, + dictWord{18, 0, 171}, + dictWord{152, 0, 20}, + dictWord{9, 0, 707}, + dictWord{11, 0, 326}, + dictWord{11, 0, 339}, + dictWord{12, 0, 423}, + dictWord{12, 0, 502}, + dictWord{20, 0, 62}, + dictWord{9, 11, 707}, + dictWord{ + 11, + 11, + 326, + }, + dictWord{11, 11, 339}, + dictWord{12, 11, 423}, + dictWord{12, 11, 502}, + dictWord{148, 11, 62}, + dictWord{5, 0, 30}, + dictWord{7, 0, 495}, + dictWord{ + 8, + 0, + 134, + }, + dictWord{9, 0, 788}, + dictWord{140, 0, 438}, + dictWord{133, 11, 678}, + dictWord{5, 10, 279}, + dictWord{6, 10, 235}, + dictWord{7, 10, 468}, + dictWord{ + 8, + 10, + 446, + }, + dictWord{9, 10, 637}, + dictWord{10, 10, 717}, + dictWord{11, 10, 738}, + dictWord{140, 10, 514}, + dictWord{5, 11, 35}, + dictWord{6, 11, 287}, + dictWord{ + 7, + 11, + 862, + }, + dictWord{7, 11, 1886}, + dictWord{138, 11, 179}, + dictWord{7, 0, 1948}, + dictWord{7, 0, 2004}, + dictWord{132, 11, 517}, + dictWord{5, 10, 17}, + dictWord{ + 6, + 10, + 371, + }, + dictWord{137, 10, 528}, + dictWord{4, 0, 115}, + dictWord{5, 0, 669}, + dictWord{6, 0, 407}, + dictWord{8, 0, 311}, + dictWord{11, 0, 10}, + dictWord{141, 0, 5}, + dictWord{137, 0, 381}, + dictWord{5, 0, 50}, + dictWord{6, 0, 439}, + dictWord{7, 0, 780}, + dictWord{135, 0, 1040}, + dictWord{136, 11, 667}, + dictWord{11, 11, 403}, + dictWord{146, 11, 83}, + dictWord{5, 0, 1}, + dictWord{6, 0, 81}, + dictWord{138, 0, 520}, + dictWord{134, 0, 738}, + dictWord{5, 0, 482}, + dictWord{8, 0, 98}, + dictWord{9, 0, 172}, + dictWord{10, 0, 360}, + dictWord{10, 0, 700}, + dictWord{10, 0, 822}, + dictWord{11, 0, 302}, + dictWord{11, 0, 778}, + dictWord{12, 0, 50}, + dictWord{12, 0, 127}, + dictWord{ + 12, + 0, + 396, + }, + dictWord{13, 0, 62}, + dictWord{13, 0, 328}, + dictWord{14, 0, 122}, + dictWord{147, 0, 72}, + dictWord{9, 11, 157}, + dictWord{10, 11, 131}, + dictWord{ + 140, + 11, + 72, + }, + dictWord{135, 11, 714}, + dictWord{135, 11, 539}, + dictWord{5, 0, 2}, + dictWord{6, 0, 512}, + dictWord{7, 0, 797}, + dictWord{7, 0, 1494}, + dictWord{8, 0, 253}, + dictWord{8, 0, 589}, + dictWord{9, 0, 77}, + dictWord{10, 0, 1}, + dictWord{10, 0, 129}, + dictWord{10, 0, 225}, + dictWord{11, 0, 118}, + dictWord{11, 0, 226}, + dictWord{ + 11, + 0, + 251, + }, + dictWord{11, 0, 430}, + dictWord{11, 0, 701}, + dictWord{11, 0, 974}, + dictWord{11, 0, 982}, + dictWord{12, 0, 64}, + dictWord{12, 0, 260}, + dictWord{12, 0, 488}, + dictWord{140, 0, 690}, + dictWord{5, 11, 394}, + dictWord{7, 11, 367}, + dictWord{7, 11, 487}, + dictWord{7, 11, 857}, + dictWord{7, 11, 1713}, + dictWord{8, 11, 246}, + dictWord{9, 11, 537}, + dictWord{10, 11, 165}, + dictWord{12, 11, 219}, + dictWord{140, 11, 561}, + dictWord{136, 0, 557}, + dictWord{5, 10, 779}, + dictWord{5, 10, 807}, + dictWord{6, 10, 1655}, + dictWord{134, 10, 1676}, + dictWord{4, 10, 196}, + dictWord{5, 10, 558}, + dictWord{133, 10, 949}, + dictWord{11, 11, 827}, + dictWord{ + 12, + 11, + 56, + }, + dictWord{14, 11, 34}, + dictWord{143, 11, 148}, + dictWord{137, 0, 347}, + dictWord{133, 0, 572}, + dictWord{134, 0, 832}, + dictWord{4, 0, 12}, + dictWord{ + 7, + 0, + 504, + }, + dictWord{7, 0, 522}, + dictWord{7, 0, 809}, + dictWord{8, 0, 797}, + dictWord{141, 0, 88}, + dictWord{4, 10, 752}, + dictWord{133, 11, 449}, + dictWord{7, 11, 86}, + dictWord{8, 11, 103}, + dictWord{145, 11, 69}, + dictWord{7, 11, 2028}, + dictWord{138, 11, 641}, + dictWord{5, 0, 528}, + dictWord{6, 11, 1}, + dictWord{142, 11, 2}, + dictWord{134, 0, 861}, + dictWord{10, 0, 294}, + dictWord{4, 10, 227}, + dictWord{5, 10, 159}, + dictWord{5, 10, 409}, + dictWord{7, 10, 80}, + dictWord{10, 10, 479}, + dictWord{ + 12, + 10, + 418, + }, + dictWord{14, 10, 50}, + dictWord{14, 10, 249}, + dictWord{142, 10, 295}, + dictWord{7, 10, 1470}, + dictWord{8, 10, 66}, + dictWord{8, 10, 137}, + dictWord{ + 8, + 10, + 761, + }, + dictWord{9, 10, 638}, + dictWord{11, 10, 80}, + dictWord{11, 10, 212}, + dictWord{11, 10, 368}, + dictWord{11, 10, 418}, + dictWord{12, 10, 8}, + dictWord{ + 13, + 10, + 15, + }, + dictWord{16, 10, 61}, + dictWord{17, 10, 59}, + dictWord{19, 10, 28}, + dictWord{148, 10, 84}, + dictWord{20, 0, 109}, + dictWord{135, 11, 1148}, + dictWord{ + 6, + 11, + 277, + }, + dictWord{7, 11, 1274}, + dictWord{7, 11, 1386}, + dictWord{7, 11, 1392}, + dictWord{12, 11, 129}, + dictWord{146, 11, 87}, + dictWord{6, 11, 187}, + dictWord{7, 11, 39}, + dictWord{7, 11, 1203}, + dictWord{8, 11, 380}, + dictWord{8, 11, 542}, + dictWord{14, 11, 117}, + dictWord{149, 11, 28}, + dictWord{134, 0, 1187}, + dictWord{5, 0, 266}, + dictWord{9, 0, 290}, + dictWord{9, 0, 364}, + dictWord{10, 0, 293}, + dictWord{11, 0, 606}, + dictWord{142, 0, 45}, + dictWord{6, 11, 297}, + dictWord{ + 7, + 11, + 793, + }, + dictWord{139, 11, 938}, + dictWord{4, 0, 50}, + dictWord{6, 0, 594}, + dictWord{9, 0, 121}, + dictWord{10, 0, 49}, + dictWord{10, 0, 412}, + dictWord{139, 0, 834}, + dictWord{136, 0, 748}, + dictWord{7, 11, 464}, + dictWord{8, 11, 438}, + dictWord{11, 11, 105}, + dictWord{11, 11, 363}, + dictWord{12, 11, 231}, + dictWord{ + 14, + 11, + 386, + }, + dictWord{15, 11, 102}, + dictWord{148, 11, 75}, + dictWord{132, 0, 466}, + dictWord{13, 0, 399}, + dictWord{14, 0, 337}, + dictWord{6, 10, 38}, + dictWord{ + 7, + 10, + 1220, + }, + dictWord{8, 10, 185}, + dictWord{8, 10, 256}, + dictWord{9, 10, 22}, + dictWord{9, 10, 331}, + dictWord{10, 10, 738}, + dictWord{11, 10, 205}, + dictWord{ + 11, + 10, + 540, + }, + dictWord{11, 10, 746}, + dictWord{13, 10, 465}, + dictWord{142, 10, 194}, + dictWord{9, 0, 378}, + dictWord{141, 0, 162}, + dictWord{137, 0, 519}, + dictWord{ + 4, + 10, + 159, + }, + dictWord{6, 10, 115}, + dictWord{7, 10, 252}, + dictWord{7, 10, 257}, + dictWord{7, 10, 1928}, + dictWord{8, 10, 69}, + dictWord{9, 10, 384}, + dictWord{ + 10, + 10, + 91, + }, + dictWord{10, 10, 615}, + dictWord{12, 10, 375}, + dictWord{14, 10, 235}, + dictWord{18, 10, 117}, + dictWord{147, 10, 123}, + dictWord{5, 11, 604}, + dictWord{ + 5, + 10, + 911, + }, + dictWord{136, 10, 278}, + dictWord{132, 0, 667}, + dictWord{8, 0, 351}, + dictWord{9, 0, 322}, + dictWord{4, 10, 151}, + dictWord{135, 10, 1567}, + dictWord{134, 0, 902}, + dictWord{133, 10, 990}, + dictWord{12, 0, 180}, + dictWord{5, 10, 194}, + dictWord{7, 10, 1662}, + dictWord{137, 10, 90}, + dictWord{4, 0, 869}, + dictWord{134, 0, 1996}, + dictWord{134, 0, 813}, + dictWord{133, 10, 425}, + dictWord{137, 11, 761}, + dictWord{132, 0, 260}, + dictWord{133, 10, 971}, + dictWord{ + 5, + 11, + 20, + }, + dictWord{6, 11, 298}, + dictWord{7, 11, 659}, + dictWord{7, 11, 1366}, + dictWord{137, 11, 219}, + dictWord{4, 0, 39}, + dictWord{5, 0, 36}, + dictWord{ + 7, + 0, + 1843, + }, + dictWord{8, 0, 407}, + dictWord{11, 0, 144}, + dictWord{140, 0, 523}, + dictWord{4, 0, 510}, + dictWord{10, 0, 587}, + dictWord{139, 10, 752}, + dictWord{7, 0, 29}, + dictWord{7, 0, 66}, + dictWord{7, 0, 1980}, + dictWord{10, 0, 487}, + dictWord{138, 0, 809}, + dictWord{13, 0, 260}, + dictWord{14, 0, 82}, + dictWord{18, 0, 63}, + dictWord{ + 137, + 10, + 662, + }, + dictWord{5, 10, 72}, + dictWord{6, 10, 264}, + dictWord{7, 10, 21}, + dictWord{7, 10, 46}, + dictWord{7, 10, 2013}, + dictWord{8, 10, 215}, + dictWord{ + 8, + 10, + 513, + }, + dictWord{10, 10, 266}, + dictWord{139, 10, 22}, + dictWord{134, 0, 570}, + dictWord{6, 0, 565}, + dictWord{7, 0, 1667}, + dictWord{4, 11, 439}, + dictWord{ + 10, + 10, + 95, + }, + dictWord{11, 10, 603}, + dictWord{12, 11, 242}, + dictWord{13, 10, 443}, + dictWord{14, 10, 160}, + dictWord{143, 10, 4}, + dictWord{134, 0, 1464}, + dictWord{ + 134, + 10, + 431, + }, + dictWord{9, 0, 372}, + dictWord{15, 0, 2}, + dictWord{19, 0, 10}, + dictWord{19, 0, 18}, + dictWord{5, 10, 874}, + dictWord{6, 10, 1677}, + dictWord{143, 10, 0}, + dictWord{132, 0, 787}, + dictWord{6, 0, 380}, + dictWord{12, 0, 399}, + dictWord{21, 0, 19}, + dictWord{7, 10, 939}, + dictWord{7, 10, 1172}, + dictWord{7, 10, 1671}, + dictWord{9, 10, 540}, + dictWord{10, 10, 696}, + dictWord{11, 10, 265}, + dictWord{11, 10, 732}, + dictWord{11, 10, 928}, + dictWord{11, 10, 937}, + dictWord{ + 141, + 10, + 438, + }, + dictWord{137, 0, 200}, + dictWord{132, 11, 233}, + dictWord{132, 0, 516}, + dictWord{134, 11, 577}, + dictWord{132, 0, 844}, + dictWord{11, 0, 887}, + dictWord{14, 0, 365}, + dictWord{142, 0, 375}, + dictWord{132, 11, 482}, + dictWord{8, 0, 821}, + dictWord{140, 0, 44}, + dictWord{7, 0, 1655}, + dictWord{136, 0, 305}, + dictWord{5, 10, 682}, + dictWord{135, 10, 1887}, + dictWord{135, 11, 346}, + dictWord{132, 10, 696}, + dictWord{4, 0, 10}, + dictWord{7, 0, 917}, + dictWord{139, 0, 786}, + dictWord{5, 11, 795}, + dictWord{6, 11, 1741}, + dictWord{8, 11, 417}, + dictWord{137, 11, 782}, + dictWord{4, 0, 1016}, + dictWord{134, 0, 2031}, + dictWord{5, 0, 684}, + dictWord{4, 10, 726}, + dictWord{133, 10, 630}, + dictWord{6, 0, 1021}, + dictWord{134, 0, 1480}, + dictWord{8, 10, 802}, + dictWord{136, 10, 838}, + dictWord{ + 134, + 0, + 27, + }, + dictWord{134, 0, 395}, + dictWord{135, 11, 622}, + dictWord{7, 11, 625}, + dictWord{135, 11, 1750}, + dictWord{4, 11, 203}, + dictWord{135, 11, 1936}, + dictWord{6, 10, 118}, + dictWord{7, 10, 215}, + dictWord{7, 10, 1521}, + dictWord{140, 10, 11}, + dictWord{132, 0, 813}, + dictWord{136, 0, 511}, + dictWord{7, 10, 615}, + dictWord{138, 10, 251}, + dictWord{135, 10, 1044}, + dictWord{145, 0, 56}, + dictWord{133, 10, 225}, + dictWord{6, 0, 342}, + dictWord{6, 0, 496}, + dictWord{8, 0, 275}, + dictWord{137, 0, 206}, + dictWord{4, 0, 909}, + dictWord{133, 0, 940}, + dictWord{132, 0, 891}, + dictWord{7, 11, 311}, + dictWord{9, 11, 308}, + dictWord{ + 140, + 11, + 255, + }, + dictWord{4, 10, 370}, + dictWord{5, 10, 756}, + dictWord{135, 10, 1326}, + dictWord{4, 0, 687}, + dictWord{134, 0, 1596}, + dictWord{134, 0, 1342}, + dictWord{ + 6, + 10, + 1662, + }, + dictWord{7, 10, 48}, + dictWord{8, 10, 771}, + dictWord{10, 10, 116}, + dictWord{13, 10, 104}, + dictWord{14, 10, 105}, + dictWord{14, 10, 184}, + dictWord{15, 10, 168}, + dictWord{19, 10, 92}, + dictWord{148, 10, 68}, + dictWord{138, 10, 209}, + dictWord{4, 11, 400}, + dictWord{5, 11, 267}, + dictWord{135, 11, 232}, + dictWord{151, 11, 12}, + dictWord{6, 0, 41}, + dictWord{141, 0, 160}, + dictWord{141, 11, 314}, + dictWord{134, 0, 1718}, + dictWord{136, 0, 778}, + dictWord{ + 142, + 11, + 261, + }, + dictWord{134, 0, 1610}, + dictWord{133, 0, 115}, + dictWord{132, 0, 294}, + dictWord{14, 0, 314}, + dictWord{132, 10, 120}, + dictWord{132, 0, 983}, + dictWord{5, 0, 193}, + dictWord{140, 0, 178}, + dictWord{138, 10, 429}, + dictWord{5, 10, 820}, + dictWord{135, 10, 931}, + dictWord{6, 0, 994}, + dictWord{6, 0, 1051}, + dictWord{6, 0, 1439}, + dictWord{7, 0, 174}, + dictWord{133, 11, 732}, + dictWord{4, 11, 100}, + dictWord{7, 11, 679}, + dictWord{8, 11, 313}, + dictWord{138, 10, 199}, + dictWord{6, 10, 151}, + dictWord{6, 10, 1675}, + dictWord{7, 10, 383}, + dictWord{151, 10, 10}, + dictWord{6, 0, 1796}, + dictWord{8, 0, 848}, + dictWord{8, 0, 867}, + dictWord{ + 8, + 0, + 907, + }, + dictWord{10, 0, 855}, + dictWord{140, 0, 703}, + dictWord{140, 0, 221}, + dictWord{4, 0, 122}, + dictWord{5, 0, 796}, + dictWord{5, 0, 952}, + dictWord{6, 0, 1660}, + dictWord{6, 0, 1671}, + dictWord{8, 0, 567}, + dictWord{9, 0, 687}, + dictWord{9, 0, 742}, + dictWord{10, 0, 686}, + dictWord{11, 0, 682}, + dictWord{11, 0, 909}, + dictWord{ + 140, + 0, + 281, + }, + dictWord{5, 11, 362}, + dictWord{5, 11, 443}, + dictWord{6, 11, 318}, + dictWord{7, 11, 1019}, + dictWord{139, 11, 623}, + dictWord{5, 11, 463}, + dictWord{136, 11, 296}, + dictWord{11, 0, 583}, + dictWord{13, 0, 262}, + dictWord{6, 10, 1624}, + dictWord{12, 10, 422}, + dictWord{142, 10, 360}, + dictWord{5, 0, 179}, + dictWord{7, 0, 1095}, + dictWord{135, 0, 1213}, + dictWord{4, 10, 43}, + dictWord{4, 11, 454}, + dictWord{5, 10, 344}, + dictWord{133, 10, 357}, + dictWord{4, 0, 66}, + dictWord{7, 0, 722}, + dictWord{135, 0, 904}, + dictWord{134, 0, 773}, + dictWord{7, 0, 352}, + dictWord{133, 10, 888}, + dictWord{5, 11, 48}, + dictWord{5, 11, 404}, + dictWord{ + 6, + 11, + 557, + }, + dictWord{7, 11, 458}, + dictWord{8, 11, 597}, + dictWord{10, 11, 455}, + dictWord{10, 11, 606}, + dictWord{11, 11, 49}, + dictWord{11, 11, 548}, + dictWord{ + 12, + 11, + 476, + }, + dictWord{13, 11, 18}, + dictWord{141, 11, 450}, + dictWord{134, 11, 418}, + dictWord{132, 10, 711}, + dictWord{5, 11, 442}, + dictWord{ + 135, + 11, + 1984, + }, + dictWord{141, 0, 35}, + dictWord{137, 0, 152}, + dictWord{134, 0, 1197}, + dictWord{135, 11, 1093}, + dictWord{137, 11, 203}, + dictWord{137, 10, 440}, + dictWord{10, 0, 592}, + dictWord{10, 0, 753}, + dictWord{12, 0, 317}, + dictWord{12, 0, 355}, + dictWord{12, 0, 465}, + dictWord{12, 0, 469}, + dictWord{12, 0, 560}, + dictWord{12, 0, 578}, + dictWord{141, 0, 243}, + dictWord{133, 0, 564}, + dictWord{134, 0, 797}, + dictWord{5, 10, 958}, + dictWord{133, 10, 987}, + dictWord{5, 11, 55}, + dictWord{7, 11, 376}, + dictWord{140, 11, 161}, + dictWord{133, 11, 450}, + dictWord{134, 0, 556}, + dictWord{134, 0, 819}, + dictWord{11, 10, 276}, + dictWord{ + 142, + 10, + 293, + }, + dictWord{7, 0, 544}, + dictWord{138, 0, 61}, + dictWord{8, 0, 719}, + dictWord{4, 10, 65}, + dictWord{5, 10, 479}, + dictWord{5, 10, 1004}, + dictWord{7, 10, 1913}, + dictWord{8, 10, 317}, + dictWord{9, 10, 302}, + dictWord{10, 10, 612}, + dictWord{141, 10, 22}, + dictWord{4, 0, 5}, + dictWord{5, 0, 498}, + dictWord{8, 0, 637}, + dictWord{ + 9, + 0, + 521, + }, + dictWord{4, 11, 213}, + dictWord{4, 10, 261}, + dictWord{7, 11, 223}, + dictWord{7, 10, 510}, + dictWord{136, 11, 80}, + dictWord{5, 0, 927}, + dictWord{7, 0, 101}, + dictWord{4, 10, 291}, + dictWord{7, 11, 381}, + dictWord{7, 11, 806}, + dictWord{7, 11, 820}, + dictWord{8, 11, 354}, + dictWord{8, 11, 437}, + dictWord{8, 11, 787}, + dictWord{9, 10, 515}, + dictWord{9, 11, 657}, + dictWord{10, 11, 58}, + dictWord{10, 11, 339}, + dictWord{10, 11, 749}, + dictWord{11, 11, 914}, + dictWord{12, 10, 152}, + dictWord{12, 11, 162}, + dictWord{12, 10, 443}, + dictWord{13, 11, 75}, + dictWord{13, 10, 392}, + dictWord{14, 11, 106}, + dictWord{14, 11, 198}, + dictWord{ + 14, + 11, + 320, + }, + dictWord{14, 10, 357}, + dictWord{14, 11, 413}, + dictWord{146, 11, 43}, + dictWord{6, 0, 1153}, + dictWord{7, 0, 1441}, + dictWord{136, 11, 747}, + dictWord{ + 4, + 0, + 893, + }, + dictWord{5, 0, 780}, + dictWord{133, 0, 893}, + dictWord{138, 11, 654}, + dictWord{133, 11, 692}, + dictWord{133, 0, 238}, + dictWord{134, 11, 191}, + dictWord{4, 10, 130}, + dictWord{135, 10, 843}, + dictWord{6, 0, 1296}, + dictWord{5, 10, 42}, + dictWord{5, 10, 879}, + dictWord{7, 10, 245}, + dictWord{7, 10, 324}, + dictWord{ + 7, + 10, + 1532, + }, + dictWord{11, 10, 463}, + dictWord{11, 10, 472}, + dictWord{13, 10, 363}, + dictWord{144, 10, 52}, + dictWord{134, 0, 1729}, + dictWord{6, 0, 1999}, + dictWord{136, 0, 969}, + dictWord{4, 10, 134}, + dictWord{133, 10, 372}, + dictWord{4, 0, 60}, + dictWord{7, 0, 941}, + dictWord{7, 0, 1800}, + dictWord{8, 0, 314}, + dictWord{ + 9, + 0, + 700, + }, + dictWord{139, 0, 487}, + dictWord{134, 0, 1144}, + dictWord{6, 11, 162}, + dictWord{7, 11, 1960}, + dictWord{136, 11, 831}, + dictWord{132, 11, 706}, + dictWord{135, 0, 1147}, + dictWord{138, 11, 426}, + dictWord{138, 11, 89}, + dictWord{7, 0, 1853}, + dictWord{138, 0, 437}, + dictWord{136, 0, 419}, + dictWord{ + 135, + 10, + 1634, + }, + dictWord{133, 0, 828}, + dictWord{5, 0, 806}, + dictWord{7, 0, 176}, + dictWord{7, 0, 178}, + dictWord{7, 0, 1240}, + dictWord{7, 0, 1976}, + dictWord{ + 132, + 10, + 644, + }, + dictWord{135, 11, 1877}, + dictWord{5, 11, 420}, + dictWord{135, 11, 1449}, + dictWord{4, 0, 51}, + dictWord{5, 0, 39}, + dictWord{6, 0, 4}, + dictWord{7, 0, 591}, + dictWord{7, 0, 849}, + dictWord{7, 0, 951}, + dictWord{7, 0, 1613}, + dictWord{7, 0, 1760}, + dictWord{7, 0, 1988}, + dictWord{9, 0, 434}, + dictWord{10, 0, 754}, + dictWord{ + 11, + 0, + 25, + }, + dictWord{139, 0, 37}, + dictWord{10, 11, 57}, + dictWord{138, 11, 277}, + dictWord{135, 10, 540}, + dictWord{132, 11, 204}, + dictWord{135, 0, 159}, + dictWord{139, 11, 231}, + dictWord{133, 0, 902}, + dictWord{7, 0, 928}, + dictWord{7, 11, 366}, + dictWord{9, 11, 287}, + dictWord{12, 11, 199}, + dictWord{12, 11, 556}, + dictWord{140, 11, 577}, + dictWord{6, 10, 623}, + dictWord{136, 10, 789}, + dictWord{4, 10, 908}, + dictWord{5, 10, 359}, + dictWord{5, 10, 508}, + dictWord{6, 10, 1723}, + dictWord{7, 10, 343}, + dictWord{7, 10, 1996}, + dictWord{135, 10, 2026}, + dictWord{134, 0, 270}, + dictWord{4, 10, 341}, + dictWord{135, 10, 480}, + dictWord{ + 5, + 11, + 356, + }, + dictWord{135, 11, 224}, + dictWord{11, 11, 588}, + dictWord{11, 11, 864}, + dictWord{11, 11, 968}, + dictWord{143, 11, 160}, + dictWord{132, 0, 556}, + dictWord{137, 0, 801}, + dictWord{132, 0, 416}, + dictWord{142, 0, 372}, + dictWord{5, 0, 152}, + dictWord{5, 0, 197}, + dictWord{7, 0, 340}, + dictWord{7, 0, 867}, + dictWord{ + 10, + 0, + 548, + }, + dictWord{10, 0, 581}, + dictWord{11, 0, 6}, + dictWord{12, 0, 3}, + dictWord{12, 0, 19}, + dictWord{14, 0, 110}, + dictWord{142, 0, 289}, + dictWord{139, 0, 369}, + dictWord{7, 11, 630}, + dictWord{9, 11, 567}, + dictWord{11, 11, 150}, + dictWord{11, 11, 444}, + dictWord{141, 11, 119}, + dictWord{134, 11, 539}, + dictWord{ + 7, + 10, + 1995, + }, + dictWord{8, 10, 299}, + dictWord{11, 10, 890}, + dictWord{140, 10, 674}, + dictWord{7, 0, 34}, + dictWord{7, 0, 190}, + dictWord{8, 0, 28}, + dictWord{8, 0, 141}, + dictWord{8, 0, 444}, + dictWord{8, 0, 811}, + dictWord{9, 0, 468}, + dictWord{11, 0, 334}, + dictWord{12, 0, 24}, + dictWord{12, 0, 386}, + dictWord{140, 0, 576}, + dictWord{ + 133, + 0, + 757, + }, + dictWord{7, 0, 1553}, + dictWord{136, 0, 898}, + dictWord{133, 0, 721}, + dictWord{136, 0, 1012}, + dictWord{4, 0, 789}, + dictWord{5, 0, 647}, + dictWord{ + 135, + 0, + 1102, + }, + dictWord{132, 0, 898}, + dictWord{10, 0, 183}, + dictWord{4, 10, 238}, + dictWord{5, 10, 503}, + dictWord{6, 10, 179}, + dictWord{7, 10, 2003}, + dictWord{ + 8, + 10, + 381, + }, + dictWord{8, 10, 473}, + dictWord{9, 10, 149}, + dictWord{10, 10, 788}, + dictWord{15, 10, 45}, + dictWord{15, 10, 86}, + dictWord{20, 10, 110}, + dictWord{ + 150, + 10, + 57, + }, + dictWord{9, 0, 136}, + dictWord{19, 0, 107}, + dictWord{4, 10, 121}, + dictWord{5, 10, 156}, + dictWord{5, 10, 349}, + dictWord{10, 10, 605}, + dictWord{ + 142, + 10, + 342, + }, + dictWord{4, 11, 235}, + dictWord{135, 11, 255}, + dictWord{4, 11, 194}, + dictWord{5, 11, 584}, + dictWord{6, 11, 384}, + dictWord{7, 11, 583}, + dictWord{ + 10, + 11, + 761, + }, + dictWord{11, 11, 760}, + dictWord{139, 11, 851}, + dictWord{6, 10, 80}, + dictWord{6, 10, 1694}, + dictWord{7, 10, 173}, + dictWord{7, 10, 1974}, + dictWord{ + 9, + 10, + 547, + }, + dictWord{10, 10, 730}, + dictWord{14, 10, 18}, + dictWord{150, 10, 39}, + dictWord{4, 10, 923}, + dictWord{134, 10, 1711}, + dictWord{5, 0, 277}, + dictWord{141, 0, 247}, + dictWord{132, 0, 435}, + dictWord{133, 11, 562}, + dictWord{134, 0, 1311}, + dictWord{5, 11, 191}, + dictWord{137, 11, 271}, + dictWord{ + 132, + 10, + 595, + }, + dictWord{7, 11, 1537}, + dictWord{14, 11, 96}, + dictWord{143, 11, 73}, + dictWord{5, 0, 437}, + dictWord{7, 0, 502}, + dictWord{7, 0, 519}, + dictWord{7, 0, 1122}, + dictWord{7, 0, 1751}, + dictWord{14, 0, 211}, + dictWord{6, 10, 459}, + dictWord{7, 10, 1753}, + dictWord{7, 10, 1805}, + dictWord{8, 10, 658}, + dictWord{9, 10, 1}, + dictWord{11, 10, 959}, + dictWord{141, 10, 446}, + dictWord{6, 0, 814}, + dictWord{4, 11, 470}, + dictWord{5, 11, 473}, + dictWord{6, 11, 153}, + dictWord{7, 11, 1503}, + dictWord{7, 11, 1923}, + dictWord{10, 11, 701}, + dictWord{11, 11, 132}, + dictWord{11, 11, 168}, + dictWord{11, 11, 227}, + dictWord{11, 11, 320}, + dictWord{ + 11, + 11, + 436, + }, + dictWord{11, 11, 525}, + dictWord{11, 11, 855}, + dictWord{12, 11, 41}, + dictWord{12, 11, 286}, + dictWord{13, 11, 103}, + dictWord{13, 11, 284}, + dictWord{ + 14, + 11, + 255, + }, + dictWord{14, 11, 262}, + dictWord{15, 11, 117}, + dictWord{143, 11, 127}, + dictWord{5, 0, 265}, + dictWord{6, 0, 212}, + dictWord{135, 0, 28}, + dictWord{ + 138, + 0, + 750, + }, + dictWord{133, 11, 327}, + dictWord{6, 11, 552}, + dictWord{7, 11, 1754}, + dictWord{137, 11, 604}, + dictWord{134, 0, 2012}, + dictWord{132, 0, 702}, + dictWord{5, 11, 80}, + dictWord{6, 11, 405}, + dictWord{7, 11, 403}, + dictWord{7, 11, 1502}, + dictWord{7, 11, 1626}, + dictWord{8, 11, 456}, + dictWord{9, 11, 487}, + dictWord{9, 11, 853}, + dictWord{9, 11, 889}, + dictWord{10, 11, 309}, + dictWord{11, 11, 721}, + dictWord{11, 11, 994}, + dictWord{12, 11, 430}, + dictWord{ + 141, + 11, + 165, + }, + dictWord{5, 0, 808}, + dictWord{135, 0, 2045}, + dictWord{5, 0, 166}, + dictWord{8, 0, 739}, + dictWord{140, 0, 511}, + dictWord{134, 10, 490}, + dictWord{ + 4, + 11, + 453, + }, + dictWord{5, 11, 887}, + dictWord{6, 11, 535}, + dictWord{8, 11, 6}, + dictWord{136, 11, 543}, + dictWord{4, 0, 119}, + dictWord{5, 0, 170}, + dictWord{5, 0, 447}, + dictWord{7, 0, 1708}, + dictWord{7, 0, 1889}, + dictWord{9, 0, 357}, + dictWord{9, 0, 719}, + dictWord{12, 0, 486}, + dictWord{140, 0, 596}, + dictWord{137, 0, 500}, + dictWord{ + 7, + 10, + 250, + }, + dictWord{136, 10, 507}, + dictWord{132, 10, 158}, + dictWord{6, 0, 809}, + dictWord{134, 0, 1500}, + dictWord{9, 0, 327}, + dictWord{11, 0, 350}, + dictWord{11, 0, 831}, + dictWord{13, 0, 352}, + dictWord{4, 10, 140}, + dictWord{7, 10, 362}, + dictWord{8, 10, 209}, + dictWord{9, 10, 10}, + dictWord{9, 10, 503}, + dictWord{ + 9, + 10, + 614, + }, + dictWord{10, 10, 689}, + dictWord{11, 10, 327}, + dictWord{11, 10, 725}, + dictWord{12, 10, 252}, + dictWord{12, 10, 583}, + dictWord{13, 10, 192}, + dictWord{14, 10, 269}, + dictWord{14, 10, 356}, + dictWord{148, 10, 50}, + dictWord{135, 11, 741}, + dictWord{4, 0, 450}, + dictWord{7, 0, 1158}, + dictWord{19, 10, 1}, + dictWord{19, 10, 26}, + dictWord{150, 10, 9}, + dictWord{6, 0, 597}, + dictWord{135, 0, 1318}, + dictWord{134, 0, 1602}, + dictWord{6, 10, 228}, + dictWord{7, 10, 1341}, + dictWord{9, 10, 408}, + dictWord{138, 10, 343}, + dictWord{7, 0, 1375}, + dictWord{7, 0, 1466}, + dictWord{138, 0, 331}, + dictWord{132, 0, 754}, + dictWord{ + 132, + 10, + 557, + }, + dictWord{5, 11, 101}, + dictWord{6, 11, 88}, + dictWord{6, 11, 543}, + dictWord{7, 11, 1677}, + dictWord{9, 11, 100}, + dictWord{10, 11, 677}, + dictWord{ + 14, + 11, + 169, + }, + dictWord{14, 11, 302}, + dictWord{14, 11, 313}, + dictWord{15, 11, 48}, + dictWord{143, 11, 84}, + dictWord{134, 0, 1368}, + dictWord{4, 11, 310}, + dictWord{ + 9, + 11, + 795, + }, + dictWord{10, 11, 733}, + dictWord{11, 11, 451}, + dictWord{12, 11, 249}, + dictWord{14, 11, 115}, + dictWord{14, 11, 286}, + dictWord{143, 11, 100}, + dictWord{132, 10, 548}, + dictWord{10, 0, 557}, + dictWord{7, 10, 197}, + dictWord{8, 10, 142}, + dictWord{8, 10, 325}, + dictWord{9, 10, 150}, + dictWord{9, 10, 596}, + dictWord{10, 10, 353}, + dictWord{11, 10, 74}, + dictWord{11, 10, 315}, + dictWord{12, 10, 662}, + dictWord{12, 10, 681}, + dictWord{14, 10, 423}, + dictWord{ + 143, + 10, + 141, + }, + dictWord{133, 11, 587}, + dictWord{5, 0, 850}, + dictWord{136, 0, 799}, + dictWord{10, 0, 908}, + dictWord{12, 0, 701}, + dictWord{12, 0, 757}, + dictWord{ + 142, + 0, + 466, + }, + dictWord{4, 0, 62}, + dictWord{5, 0, 275}, + dictWord{18, 0, 19}, + dictWord{6, 10, 399}, + dictWord{6, 10, 579}, + dictWord{7, 10, 692}, + dictWord{7, 10, 846}, + dictWord{ + 7, + 10, + 1015, + }, + dictWord{7, 10, 1799}, + dictWord{8, 10, 403}, + dictWord{9, 10, 394}, + dictWord{10, 10, 133}, + dictWord{12, 10, 4}, + dictWord{12, 10, 297}, + dictWord{12, 10, 452}, + dictWord{16, 10, 81}, + dictWord{18, 10, 25}, + dictWord{21, 10, 14}, + dictWord{22, 10, 12}, + dictWord{151, 10, 18}, + dictWord{12, 0, 459}, + dictWord{ + 7, + 10, + 1546, + }, + dictWord{11, 10, 299}, + dictWord{142, 10, 407}, + dictWord{132, 10, 177}, + dictWord{132, 11, 498}, + dictWord{7, 11, 217}, + dictWord{ + 8, + 11, + 140, + }, + dictWord{138, 11, 610}, + dictWord{5, 10, 411}, + dictWord{135, 10, 653}, + dictWord{134, 0, 1802}, + dictWord{7, 10, 439}, + dictWord{10, 10, 727}, + dictWord{11, 10, 260}, + dictWord{139, 10, 684}, + dictWord{133, 11, 905}, + dictWord{11, 11, 580}, + dictWord{142, 11, 201}, + dictWord{134, 0, 1397}, + dictWord{ + 5, + 10, + 208, + }, + dictWord{7, 10, 753}, + dictWord{135, 10, 1528}, + dictWord{7, 0, 238}, + dictWord{7, 0, 2033}, + dictWord{8, 0, 120}, + dictWord{8, 0, 188}, + dictWord{8, 0, 659}, + dictWord{9, 0, 598}, + dictWord{10, 0, 466}, + dictWord{12, 0, 342}, + dictWord{12, 0, 588}, + dictWord{13, 0, 503}, + dictWord{14, 0, 246}, + dictWord{143, 0, 92}, + dictWord{135, 11, 1041}, + dictWord{4, 11, 456}, + dictWord{7, 11, 105}, + dictWord{7, 11, 358}, + dictWord{7, 11, 1637}, + dictWord{8, 11, 643}, + dictWord{139, 11, 483}, + dictWord{6, 0, 1318}, + dictWord{134, 0, 1324}, + dictWord{4, 0, 201}, + dictWord{7, 0, 1744}, + dictWord{8, 0, 602}, + dictWord{11, 0, 247}, + dictWord{11, 0, 826}, + dictWord{17, 0, 65}, + dictWord{133, 10, 242}, + dictWord{8, 0, 164}, + dictWord{146, 0, 62}, + dictWord{133, 10, 953}, + dictWord{139, 10, 802}, + dictWord{133, 0, 615}, + dictWord{7, 11, 1566}, + dictWord{8, 11, 269}, + dictWord{9, 11, 212}, + dictWord{9, 11, 718}, + dictWord{14, 11, 15}, + dictWord{14, 11, 132}, + dictWord{142, 11, 227}, + dictWord{133, 10, 290}, + dictWord{132, 10, 380}, + dictWord{5, 10, 52}, + dictWord{7, 10, 277}, + dictWord{9, 10, 368}, + dictWord{139, 10, 791}, + dictWord{ + 135, + 0, + 1243, + }, + dictWord{133, 11, 539}, + dictWord{11, 11, 919}, + dictWord{141, 11, 409}, + dictWord{136, 0, 968}, + dictWord{133, 11, 470}, + dictWord{134, 0, 882}, + dictWord{132, 0, 907}, + dictWord{5, 0, 100}, + dictWord{10, 0, 329}, + dictWord{12, 0, 416}, + dictWord{149, 0, 29}, + dictWord{10, 10, 138}, + dictWord{139, 10, 476}, + dictWord{5, 10, 725}, + dictWord{5, 10, 727}, + dictWord{6, 11, 91}, + dictWord{7, 11, 435}, + dictWord{135, 10, 1811}, + dictWord{4, 11, 16}, + dictWord{5, 11, 316}, + dictWord{5, 11, 842}, + dictWord{6, 11, 370}, + dictWord{6, 11, 1778}, + dictWord{8, 11, 166}, + dictWord{11, 11, 812}, + dictWord{12, 11, 206}, + dictWord{12, 11, 351}, + dictWord{14, 11, 418}, + dictWord{16, 11, 15}, + dictWord{16, 11, 34}, + dictWord{18, 11, 3}, + dictWord{19, 11, 3}, + dictWord{19, 11, 7}, + dictWord{20, 11, 4}, + dictWord{ + 149, + 11, + 21, + }, + dictWord{132, 0, 176}, + dictWord{5, 0, 636}, + dictWord{5, 0, 998}, + dictWord{7, 0, 9}, + dictWord{7, 0, 1508}, + dictWord{8, 0, 26}, + dictWord{9, 0, 317}, + dictWord{ + 9, + 0, + 358, + }, + dictWord{10, 0, 210}, + dictWord{10, 0, 292}, + dictWord{10, 0, 533}, + dictWord{11, 0, 555}, + dictWord{12, 0, 526}, + dictWord{12, 0, 607}, + dictWord{ + 13, + 0, + 263, + }, + dictWord{13, 0, 459}, + dictWord{142, 0, 271}, + dictWord{6, 0, 256}, + dictWord{8, 0, 265}, + dictWord{4, 10, 38}, + dictWord{7, 10, 307}, + dictWord{7, 10, 999}, + dictWord{7, 10, 1481}, + dictWord{7, 10, 1732}, + dictWord{7, 10, 1738}, + dictWord{9, 10, 414}, + dictWord{11, 10, 316}, + dictWord{12, 10, 52}, + dictWord{13, 10, 420}, + dictWord{147, 10, 100}, + dictWord{135, 10, 1296}, + dictWord{4, 11, 611}, + dictWord{133, 11, 606}, + dictWord{4, 0, 643}, + dictWord{142, 11, 21}, + dictWord{ + 133, + 11, + 715, + }, + dictWord{133, 10, 723}, + dictWord{6, 0, 610}, + dictWord{135, 11, 597}, + dictWord{10, 0, 127}, + dictWord{141, 0, 27}, + dictWord{6, 0, 1995}, + dictWord{ + 6, + 0, + 2001, + }, + dictWord{8, 0, 119}, + dictWord{136, 0, 973}, + dictWord{4, 11, 149}, + dictWord{138, 11, 368}, + dictWord{12, 0, 522}, + dictWord{4, 11, 154}, + dictWord{ + 5, + 10, + 109, + }, + dictWord{6, 10, 1784}, + dictWord{7, 11, 1134}, + dictWord{7, 10, 1895}, + dictWord{8, 11, 105}, + dictWord{12, 10, 296}, + dictWord{140, 10, 302}, + dictWord{4, 11, 31}, + dictWord{6, 11, 429}, + dictWord{7, 11, 962}, + dictWord{9, 11, 458}, + dictWord{139, 11, 691}, + dictWord{10, 0, 553}, + dictWord{11, 0, 876}, + dictWord{13, 0, 193}, + dictWord{13, 0, 423}, + dictWord{14, 0, 166}, + dictWord{19, 0, 84}, + dictWord{4, 11, 312}, + dictWord{5, 10, 216}, + dictWord{7, 10, 1879}, + dictWord{ + 9, + 10, + 141, + }, + dictWord{9, 10, 270}, + dictWord{9, 10, 679}, + dictWord{10, 10, 159}, + dictWord{11, 10, 197}, + dictWord{12, 10, 538}, + dictWord{12, 10, 559}, + dictWord{14, 10, 144}, + dictWord{14, 10, 167}, + dictWord{143, 10, 67}, + dictWord{134, 0, 1582}, + dictWord{7, 0, 1578}, + dictWord{135, 11, 1578}, + dictWord{ + 137, + 10, + 81, + }, + dictWord{132, 11, 236}, + dictWord{134, 10, 391}, + dictWord{134, 0, 795}, + dictWord{7, 10, 322}, + dictWord{136, 10, 249}, + dictWord{5, 11, 836}, + dictWord{ + 5, + 11, + 857, + }, + dictWord{6, 11, 1680}, + dictWord{7, 11, 59}, + dictWord{147, 11, 53}, + dictWord{135, 0, 432}, + dictWord{10, 11, 68}, + dictWord{139, 11, 494}, + dictWord{4, 11, 81}, + dictWord{139, 11, 867}, + dictWord{7, 0, 126}, + dictWord{136, 0, 84}, + dictWord{142, 11, 280}, + dictWord{5, 11, 282}, + dictWord{8, 11, 650}, + dictWord{ + 9, + 11, + 295, + }, + dictWord{9, 11, 907}, + dictWord{138, 11, 443}, + dictWord{136, 0, 790}, + dictWord{5, 10, 632}, + dictWord{138, 10, 526}, + dictWord{6, 0, 64}, + dictWord{12, 0, 377}, + dictWord{13, 0, 309}, + dictWord{14, 0, 141}, + dictWord{14, 0, 429}, + dictWord{14, 11, 141}, + dictWord{142, 11, 429}, + dictWord{134, 0, 1529}, + dictWord{6, 0, 321}, + dictWord{7, 0, 1857}, + dictWord{9, 0, 530}, + dictWord{19, 0, 99}, + dictWord{7, 10, 948}, + dictWord{7, 10, 1042}, + dictWord{8, 10, 235}, + dictWord{ + 8, + 10, + 461, + }, + dictWord{9, 10, 453}, + dictWord{10, 10, 354}, + dictWord{145, 10, 77}, + dictWord{7, 0, 1104}, + dictWord{11, 0, 269}, + dictWord{11, 0, 539}, + dictWord{ + 11, + 0, + 627, + }, + dictWord{11, 0, 706}, + dictWord{11, 0, 975}, + dictWord{12, 0, 248}, + dictWord{12, 0, 434}, + dictWord{12, 0, 600}, + dictWord{12, 0, 622}, + dictWord{ + 13, + 0, + 297, + }, + dictWord{13, 0, 485}, + dictWord{14, 0, 69}, + dictWord{14, 0, 409}, + dictWord{143, 0, 108}, + dictWord{4, 10, 362}, + dictWord{7, 10, 52}, + dictWord{7, 10, 303}, + dictWord{10, 11, 70}, + dictWord{12, 11, 26}, + dictWord{14, 11, 17}, + dictWord{14, 11, 178}, + dictWord{15, 11, 34}, + dictWord{149, 11, 12}, + dictWord{11, 0, 977}, + dictWord{141, 0, 507}, + dictWord{9, 0, 34}, + dictWord{139, 0, 484}, + dictWord{5, 10, 196}, + dictWord{6, 10, 486}, + dictWord{7, 10, 212}, + dictWord{8, 10, 309}, + dictWord{136, 10, 346}, + dictWord{6, 0, 1700}, + dictWord{7, 0, 26}, + dictWord{7, 0, 293}, + dictWord{7, 0, 382}, + dictWord{7, 0, 1026}, + dictWord{7, 0, 1087}, + dictWord{ + 7, + 0, + 2027, + }, + dictWord{8, 0, 24}, + dictWord{8, 0, 114}, + dictWord{8, 0, 252}, + dictWord{8, 0, 727}, + dictWord{8, 0, 729}, + dictWord{9, 0, 30}, + dictWord{9, 0, 199}, + dictWord{ + 9, + 0, + 231, + }, + dictWord{9, 0, 251}, + dictWord{9, 0, 334}, + dictWord{9, 0, 361}, + dictWord{9, 0, 712}, + dictWord{10, 0, 55}, + dictWord{10, 0, 60}, + dictWord{10, 0, 232}, + dictWord{ + 10, + 0, + 332, + }, + dictWord{10, 0, 384}, + dictWord{10, 0, 396}, + dictWord{10, 0, 504}, + dictWord{10, 0, 542}, + dictWord{10, 0, 652}, + dictWord{11, 0, 20}, + dictWord{11, 0, 48}, + dictWord{11, 0, 207}, + dictWord{11, 0, 291}, + dictWord{11, 0, 298}, + dictWord{11, 0, 342}, + dictWord{11, 0, 365}, + dictWord{11, 0, 394}, + dictWord{11, 0, 620}, + dictWord{11, 0, 705}, + dictWord{11, 0, 1017}, + dictWord{12, 0, 123}, + dictWord{12, 0, 340}, + dictWord{12, 0, 406}, + dictWord{12, 0, 643}, + dictWord{13, 0, 61}, + dictWord{ + 13, + 0, + 269, + }, + dictWord{13, 0, 311}, + dictWord{13, 0, 319}, + dictWord{13, 0, 486}, + dictWord{14, 0, 234}, + dictWord{15, 0, 62}, + dictWord{15, 0, 85}, + dictWord{16, 0, 71}, + dictWord{18, 0, 119}, + dictWord{20, 0, 105}, + dictWord{135, 10, 1912}, + dictWord{4, 11, 71}, + dictWord{5, 11, 376}, + dictWord{7, 11, 119}, + dictWord{138, 11, 665}, + dictWord{10, 0, 918}, + dictWord{10, 0, 926}, + dictWord{4, 10, 686}, + dictWord{136, 11, 55}, + dictWord{138, 10, 625}, + dictWord{136, 10, 706}, + dictWord{ + 132, + 11, + 479, + }, + dictWord{4, 10, 30}, + dictWord{133, 10, 43}, + dictWord{6, 0, 379}, + dictWord{7, 0, 270}, + dictWord{8, 0, 176}, + dictWord{8, 0, 183}, + dictWord{9, 0, 432}, + dictWord{ + 9, + 0, + 661, + }, + dictWord{12, 0, 247}, + dictWord{12, 0, 617}, + dictWord{18, 0, 125}, + dictWord{7, 11, 607}, + dictWord{8, 11, 99}, + dictWord{152, 11, 4}, + dictWord{ + 5, + 0, + 792, + }, + dictWord{133, 0, 900}, + dictWord{4, 11, 612}, + dictWord{133, 11, 561}, + dictWord{4, 11, 41}, + dictWord{4, 10, 220}, + dictWord{5, 11, 74}, + dictWord{ + 7, + 10, + 1535, + }, + dictWord{7, 11, 1627}, + dictWord{11, 11, 871}, + dictWord{140, 11, 619}, + dictWord{135, 0, 1920}, + dictWord{7, 11, 94}, + dictWord{11, 11, 329}, + dictWord{11, 11, 965}, + dictWord{12, 11, 241}, + dictWord{14, 11, 354}, + dictWord{15, 11, 22}, + dictWord{148, 11, 63}, + dictWord{9, 11, 209}, + dictWord{137, 11, 300}, + dictWord{134, 0, 771}, + dictWord{135, 0, 1979}, + dictWord{4, 0, 901}, + dictWord{133, 0, 776}, + dictWord{142, 0, 254}, + dictWord{133, 11, 98}, + dictWord{ + 9, + 11, + 16, + }, + dictWord{141, 11, 386}, + dictWord{133, 11, 984}, + dictWord{4, 11, 182}, + dictWord{6, 11, 205}, + dictWord{135, 11, 220}, + dictWord{7, 10, 1725}, + dictWord{ + 7, + 10, + 1774, + }, + dictWord{138, 10, 393}, + dictWord{5, 10, 263}, + dictWord{134, 10, 414}, + dictWord{4, 11, 42}, + dictWord{9, 11, 205}, + dictWord{9, 11, 786}, + dictWord{138, 11, 659}, + dictWord{14, 0, 140}, + dictWord{148, 0, 41}, + dictWord{8, 0, 440}, + dictWord{10, 0, 359}, + dictWord{6, 10, 178}, + dictWord{6, 11, 289}, + dictWord{ + 6, + 10, + 1750, + }, + dictWord{7, 11, 1670}, + dictWord{9, 10, 690}, + dictWord{10, 10, 155}, + dictWord{10, 10, 373}, + dictWord{11, 10, 698}, + dictWord{12, 11, 57}, + dictWord{13, 10, 155}, + dictWord{20, 10, 93}, + dictWord{151, 11, 4}, + dictWord{4, 0, 37}, + dictWord{5, 0, 334}, + dictWord{7, 0, 1253}, + dictWord{151, 11, 25}, + dictWord{ + 4, + 0, + 508, + }, + dictWord{4, 11, 635}, + dictWord{5, 10, 97}, + dictWord{137, 10, 393}, + dictWord{139, 11, 533}, + dictWord{4, 0, 640}, + dictWord{133, 0, 513}, + dictWord{ + 134, + 10, + 1639, + }, + dictWord{132, 11, 371}, + dictWord{4, 11, 272}, + dictWord{7, 11, 836}, + dictWord{7, 11, 1651}, + dictWord{145, 11, 89}, + dictWord{5, 11, 825}, + dictWord{6, 11, 444}, + dictWord{6, 11, 1640}, + dictWord{136, 11, 308}, + dictWord{4, 10, 191}, + dictWord{7, 10, 934}, + dictWord{8, 10, 647}, + dictWord{145, 10, 97}, + dictWord{12, 0, 246}, + dictWord{15, 0, 162}, + dictWord{19, 0, 64}, + dictWord{20, 0, 8}, + dictWord{20, 0, 95}, + dictWord{22, 0, 24}, + dictWord{152, 0, 17}, + dictWord{4, 0, 533}, + dictWord{5, 10, 165}, + dictWord{9, 10, 346}, + dictWord{138, 10, 655}, + dictWord{5, 11, 737}, + dictWord{139, 10, 885}, + dictWord{133, 10, 877}, + dictWord{ + 8, + 10, + 128, + }, + dictWord{139, 10, 179}, + dictWord{137, 11, 307}, + dictWord{140, 0, 752}, + dictWord{133, 0, 920}, + dictWord{135, 0, 1048}, + dictWord{5, 0, 153}, + dictWord{ + 6, + 0, + 580, + }, + dictWord{6, 10, 1663}, + dictWord{7, 10, 132}, + dictWord{7, 10, 1154}, + dictWord{7, 10, 1415}, + dictWord{7, 10, 1507}, + dictWord{12, 10, 493}, + dictWord{15, 10, 105}, + dictWord{151, 10, 15}, + dictWord{5, 10, 459}, + dictWord{7, 10, 1073}, + dictWord{8, 10, 241}, + dictWord{136, 10, 334}, + dictWord{138, 0, 391}, + dictWord{135, 0, 1952}, + dictWord{133, 11, 525}, + dictWord{8, 11, 641}, + dictWord{11, 11, 388}, + dictWord{140, 11, 580}, + dictWord{142, 0, 126}, + dictWord{ + 134, + 0, + 640, + }, + dictWord{132, 0, 483}, + dictWord{7, 0, 1616}, + dictWord{9, 0, 69}, + dictWord{6, 10, 324}, + dictWord{6, 10, 520}, + dictWord{7, 10, 338}, + dictWord{ + 7, + 10, + 1729, + }, + dictWord{8, 10, 228}, + dictWord{139, 10, 750}, + dictWord{5, 11, 493}, + dictWord{134, 11, 528}, + dictWord{135, 0, 734}, + dictWord{4, 11, 174}, + dictWord{135, 11, 911}, + dictWord{138, 0, 480}, + dictWord{9, 0, 495}, + dictWord{146, 0, 104}, + dictWord{135, 10, 705}, + dictWord{9, 0, 472}, + dictWord{4, 10, 73}, + dictWord{6, 10, 612}, + dictWord{7, 10, 927}, + dictWord{7, 10, 1330}, + dictWord{7, 10, 1822}, + dictWord{8, 10, 217}, + dictWord{9, 10, 765}, + dictWord{9, 10, 766}, + dictWord{10, 10, 408}, + dictWord{11, 10, 51}, + dictWord{11, 10, 793}, + dictWord{12, 10, 266}, + dictWord{15, 10, 158}, + dictWord{20, 10, 89}, + dictWord{150, 10, 32}, + dictWord{7, 11, 548}, + dictWord{137, 11, 58}, + dictWord{4, 11, 32}, + dictWord{5, 11, 215}, + dictWord{6, 11, 269}, + dictWord{7, 11, 1782}, + dictWord{7, 11, 1892}, + dictWord{10, 11, 16}, + dictWord{11, 11, 822}, + dictWord{11, 11, 954}, + dictWord{141, 11, 481}, + dictWord{132, 0, 874}, + dictWord{9, 0, 229}, + dictWord{5, 10, 389}, + dictWord{136, 10, 636}, + dictWord{7, 11, 1749}, + dictWord{136, 11, 477}, + dictWord{134, 0, 948}, + dictWord{5, 11, 308}, + dictWord{135, 11, 1088}, + dictWord{ + 4, + 0, + 748, + }, + dictWord{139, 0, 1009}, + dictWord{136, 10, 21}, + dictWord{6, 0, 555}, + dictWord{135, 0, 485}, + dictWord{5, 11, 126}, + dictWord{8, 11, 297}, + dictWord{ + 9, + 11, + 366, + }, + dictWord{9, 11, 445}, + dictWord{12, 11, 53}, + dictWord{12, 11, 374}, + dictWord{141, 11, 492}, + dictWord{7, 11, 1551}, + dictWord{139, 11, 361}, + dictWord{136, 0, 193}, + dictWord{136, 0, 472}, + dictWord{8, 0, 653}, + dictWord{13, 0, 93}, + dictWord{147, 0, 14}, + dictWord{132, 0, 984}, + dictWord{132, 11, 175}, + dictWord{5, 0, 172}, + dictWord{6, 0, 1971}, + dictWord{132, 11, 685}, + dictWord{149, 11, 8}, + dictWord{133, 11, 797}, + dictWord{13, 0, 83}, + dictWord{5, 10, 189}, + dictWord{ + 7, + 10, + 442, + }, + dictWord{7, 10, 443}, + dictWord{8, 10, 281}, + dictWord{12, 10, 174}, + dictWord{141, 10, 261}, + dictWord{134, 0, 1568}, + dictWord{133, 11, 565}, + dictWord{139, 0, 384}, + dictWord{133, 0, 260}, + dictWord{7, 0, 758}, + dictWord{7, 0, 880}, + dictWord{7, 0, 1359}, + dictWord{9, 0, 164}, + dictWord{9, 0, 167}, + dictWord{ + 10, + 0, + 156, + }, + dictWord{10, 0, 588}, + dictWord{12, 0, 101}, + dictWord{14, 0, 48}, + dictWord{15, 0, 70}, + dictWord{6, 10, 2}, + dictWord{7, 10, 1262}, + dictWord{ + 7, + 10, + 1737, + }, + dictWord{8, 10, 22}, + dictWord{8, 10, 270}, + dictWord{8, 10, 612}, + dictWord{9, 10, 312}, + dictWord{9, 10, 436}, + dictWord{10, 10, 311}, + dictWord{ + 10, + 10, + 623, + }, + dictWord{11, 10, 72}, + dictWord{11, 10, 330}, + dictWord{11, 10, 455}, + dictWord{12, 10, 321}, + dictWord{12, 10, 504}, + dictWord{12, 10, 530}, + dictWord{ + 12, + 10, + 543, + }, + dictWord{13, 10, 17}, + dictWord{13, 10, 156}, + dictWord{13, 10, 334}, + dictWord{17, 10, 60}, + dictWord{148, 10, 64}, + dictWord{4, 11, 252}, + dictWord{ + 7, + 11, + 1068, + }, + dictWord{10, 11, 434}, + dictWord{11, 11, 228}, + dictWord{11, 11, 426}, + dictWord{13, 11, 231}, + dictWord{18, 11, 106}, + dictWord{148, 11, 87}, + dictWord{7, 10, 354}, + dictWord{10, 10, 410}, + dictWord{139, 10, 815}, + dictWord{6, 0, 367}, + dictWord{7, 10, 670}, + dictWord{7, 10, 1327}, + dictWord{8, 10, 411}, + dictWord{8, 10, 435}, + dictWord{9, 10, 653}, + dictWord{9, 10, 740}, + dictWord{10, 10, 385}, + dictWord{11, 10, 222}, + dictWord{11, 10, 324}, + dictWord{11, 10, 829}, + dictWord{140, 10, 611}, + dictWord{7, 0, 1174}, + dictWord{6, 10, 166}, + dictWord{135, 10, 374}, + dictWord{146, 0, 121}, + dictWord{132, 0, 828}, + dictWord{ + 5, + 11, + 231, + }, + dictWord{138, 11, 509}, + dictWord{7, 11, 601}, + dictWord{9, 11, 277}, + dictWord{9, 11, 674}, + dictWord{10, 11, 178}, + dictWord{10, 11, 257}, + dictWord{ + 10, + 11, + 418, + }, + dictWord{11, 11, 531}, + dictWord{11, 11, 544}, + dictWord{11, 11, 585}, + dictWord{12, 11, 113}, + dictWord{12, 11, 475}, + dictWord{13, 11, 99}, + dictWord{142, 11, 428}, + dictWord{134, 0, 1541}, + dictWord{135, 11, 1779}, + dictWord{5, 0, 343}, + dictWord{134, 10, 398}, + dictWord{135, 10, 50}, + dictWord{ + 135, + 11, + 1683, + }, + dictWord{4, 0, 440}, + dictWord{7, 0, 57}, + dictWord{8, 0, 167}, + dictWord{8, 0, 375}, + dictWord{9, 0, 82}, + dictWord{9, 0, 561}, + dictWord{9, 0, 744}, + dictWord{ + 10, + 0, + 620, + }, + dictWord{137, 11, 744}, + dictWord{134, 0, 926}, + dictWord{6, 10, 517}, + dictWord{7, 10, 1159}, + dictWord{10, 10, 621}, + dictWord{139, 10, 192}, + dictWord{137, 0, 827}, + dictWord{8, 0, 194}, + dictWord{136, 0, 756}, + dictWord{10, 10, 223}, + dictWord{139, 10, 645}, + dictWord{7, 10, 64}, + dictWord{ + 136, + 10, + 245, + }, + dictWord{4, 11, 399}, + dictWord{5, 11, 119}, + dictWord{5, 11, 494}, + dictWord{7, 11, 751}, + dictWord{137, 11, 556}, + dictWord{132, 0, 808}, + dictWord{ + 135, + 0, + 22, + }, + dictWord{7, 10, 1763}, + dictWord{140, 10, 310}, + dictWord{5, 0, 639}, + dictWord{7, 0, 1249}, + dictWord{11, 0, 896}, + dictWord{134, 11, 584}, + dictWord{ + 134, + 0, + 1614, + }, + dictWord{135, 0, 860}, + dictWord{135, 11, 1121}, + dictWord{5, 10, 129}, + dictWord{6, 10, 61}, + dictWord{135, 10, 947}, + dictWord{4, 0, 102}, + dictWord{ + 7, + 0, + 815, + }, + dictWord{7, 0, 1699}, + dictWord{139, 0, 964}, + dictWord{13, 10, 505}, + dictWord{141, 10, 506}, + dictWord{139, 10, 1000}, + dictWord{ + 132, + 11, + 679, + }, + dictWord{132, 0, 899}, + dictWord{132, 0, 569}, + dictWord{5, 11, 694}, + dictWord{137, 11, 714}, + dictWord{136, 0, 795}, + dictWord{6, 0, 2045}, + dictWord{ + 139, + 11, + 7, + }, + dictWord{6, 0, 52}, + dictWord{9, 0, 104}, + dictWord{9, 0, 559}, + dictWord{12, 0, 308}, + dictWord{147, 0, 87}, + dictWord{4, 0, 301}, + dictWord{132, 0, 604}, + dictWord{133, 10, 637}, + dictWord{136, 0, 779}, + dictWord{5, 11, 143}, + dictWord{5, 11, 769}, + dictWord{6, 11, 1760}, + dictWord{7, 11, 682}, + dictWord{7, 11, 1992}, + dictWord{136, 11, 736}, + dictWord{137, 10, 590}, + dictWord{147, 0, 32}, + dictWord{137, 11, 527}, + dictWord{5, 10, 280}, + dictWord{135, 10, 1226}, + dictWord{134, 0, 494}, + dictWord{6, 0, 677}, + dictWord{6, 0, 682}, + dictWord{134, 0, 1044}, + dictWord{133, 10, 281}, + dictWord{135, 10, 1064}, + dictWord{7, 0, 508}, + dictWord{133, 11, 860}, + dictWord{6, 11, 422}, + dictWord{7, 11, 0}, + dictWord{7, 11, 1544}, + dictWord{9, 11, 577}, + dictWord{11, 11, 990}, + dictWord{12, 11, 141}, + dictWord{12, 11, 453}, + dictWord{13, 11, 47}, + dictWord{141, 11, 266}, + dictWord{134, 0, 1014}, + dictWord{5, 11, 515}, + dictWord{137, 11, 131}, + dictWord{ + 134, + 0, + 957, + }, + dictWord{132, 11, 646}, + dictWord{6, 0, 310}, + dictWord{7, 0, 1849}, + dictWord{8, 0, 72}, + dictWord{8, 0, 272}, + dictWord{8, 0, 431}, + dictWord{9, 0, 12}, + dictWord{ + 9, + 0, + 376, + }, + dictWord{10, 0, 563}, + dictWord{10, 0, 630}, + dictWord{10, 0, 796}, + dictWord{10, 0, 810}, + dictWord{11, 0, 367}, + dictWord{11, 0, 599}, + dictWord{ + 11, + 0, + 686, + }, + dictWord{140, 0, 672}, + dictWord{7, 0, 570}, + dictWord{4, 11, 396}, + dictWord{7, 10, 120}, + dictWord{7, 11, 728}, + dictWord{8, 10, 489}, + dictWord{9, 11, 117}, + dictWord{9, 10, 319}, + dictWord{10, 10, 820}, + dictWord{11, 10, 1004}, + dictWord{12, 10, 379}, + dictWord{12, 10, 679}, + dictWord{13, 10, 117}, + dictWord{ + 13, + 11, + 202, + }, + dictWord{13, 10, 412}, + dictWord{14, 10, 25}, + dictWord{15, 10, 52}, + dictWord{15, 10, 161}, + dictWord{16, 10, 47}, + dictWord{20, 11, 51}, + dictWord{ + 149, + 10, + 2, + }, + dictWord{6, 11, 121}, + dictWord{6, 11, 124}, + dictWord{6, 11, 357}, + dictWord{7, 11, 1138}, + dictWord{7, 11, 1295}, + dictWord{8, 11, 162}, + dictWord{ + 139, + 11, + 655, + }, + dictWord{8, 0, 449}, + dictWord{4, 10, 937}, + dictWord{5, 10, 801}, + dictWord{136, 11, 449}, + dictWord{139, 11, 958}, + dictWord{6, 0, 181}, + dictWord{ + 7, + 0, + 537, + }, + dictWord{8, 0, 64}, + dictWord{9, 0, 127}, + dictWord{10, 0, 496}, + dictWord{12, 0, 510}, + dictWord{141, 0, 384}, + dictWord{138, 11, 253}, + dictWord{4, 0, 244}, + dictWord{135, 0, 233}, + dictWord{133, 11, 237}, + dictWord{132, 10, 365}, + dictWord{6, 0, 1650}, + dictWord{10, 0, 702}, + dictWord{139, 0, 245}, + dictWord{ + 5, + 10, + 7, + }, + dictWord{139, 10, 774}, + dictWord{13, 0, 463}, + dictWord{20, 0, 49}, + dictWord{13, 11, 463}, + dictWord{148, 11, 49}, + dictWord{4, 10, 734}, + dictWord{ + 5, + 10, + 662, + }, + dictWord{134, 10, 430}, + dictWord{4, 10, 746}, + dictWord{135, 10, 1090}, + dictWord{5, 10, 360}, + dictWord{136, 10, 237}, + dictWord{137, 0, 338}, + dictWord{143, 11, 10}, + dictWord{7, 11, 571}, + dictWord{138, 11, 366}, + dictWord{134, 0, 1279}, + dictWord{9, 11, 513}, + dictWord{10, 11, 22}, + dictWord{10, 11, 39}, + dictWord{12, 11, 122}, + dictWord{140, 11, 187}, + dictWord{133, 0, 896}, + dictWord{146, 0, 178}, + dictWord{134, 0, 695}, + dictWord{137, 0, 808}, + dictWord{ + 134, + 11, + 587, + }, + dictWord{7, 11, 107}, + dictWord{7, 11, 838}, + dictWord{8, 11, 550}, + dictWord{138, 11, 401}, + dictWord{7, 0, 1117}, + dictWord{136, 0, 539}, + dictWord{ + 4, + 10, + 277, + }, + dictWord{5, 10, 608}, + dictWord{6, 10, 493}, + dictWord{7, 10, 457}, + dictWord{140, 10, 384}, + dictWord{133, 11, 768}, + dictWord{12, 0, 257}, + dictWord{ + 7, + 10, + 27, + }, + dictWord{135, 10, 316}, + dictWord{140, 0, 1003}, + dictWord{4, 0, 207}, + dictWord{5, 0, 586}, + dictWord{5, 0, 676}, + dictWord{6, 0, 448}, + dictWord{ + 8, + 0, + 244, + }, + dictWord{11, 0, 1}, + dictWord{13, 0, 3}, + dictWord{16, 0, 54}, + dictWord{17, 0, 4}, + dictWord{18, 0, 13}, + dictWord{133, 10, 552}, + dictWord{4, 10, 401}, + dictWord{ + 137, + 10, + 264, + }, + dictWord{5, 0, 516}, + dictWord{7, 0, 1883}, + dictWord{135, 11, 1883}, + dictWord{12, 0, 960}, + dictWord{132, 11, 894}, + dictWord{5, 0, 4}, + dictWord{ + 5, + 0, + 810, + }, + dictWord{6, 0, 13}, + dictWord{6, 0, 538}, + dictWord{6, 0, 1690}, + dictWord{6, 0, 1726}, + dictWord{7, 0, 499}, + dictWord{7, 0, 1819}, + dictWord{8, 0, 148}, + dictWord{ + 8, + 0, + 696, + }, + dictWord{8, 0, 791}, + dictWord{12, 0, 125}, + dictWord{143, 0, 9}, + dictWord{135, 0, 1268}, + dictWord{11, 0, 30}, + dictWord{14, 0, 315}, + dictWord{ + 9, + 10, + 543, + }, + dictWord{10, 10, 524}, + dictWord{12, 10, 524}, + dictWord{16, 10, 18}, + dictWord{20, 10, 26}, + dictWord{148, 10, 65}, + dictWord{6, 0, 748}, + dictWord{ + 4, + 10, + 205, + }, + dictWord{5, 10, 623}, + dictWord{7, 10, 104}, + dictWord{136, 10, 519}, + dictWord{11, 0, 542}, + dictWord{139, 0, 852}, + dictWord{140, 0, 6}, + dictWord{ + 132, + 0, + 848, + }, + dictWord{7, 0, 1385}, + dictWord{11, 0, 582}, + dictWord{11, 0, 650}, + dictWord{11, 0, 901}, + dictWord{11, 0, 949}, + dictWord{12, 0, 232}, + dictWord{12, 0, 236}, + dictWord{13, 0, 413}, + dictWord{13, 0, 501}, + dictWord{18, 0, 116}, + dictWord{7, 10, 579}, + dictWord{9, 10, 41}, + dictWord{9, 10, 244}, + dictWord{9, 10, 669}, + dictWord{10, 10, 5}, + dictWord{11, 10, 861}, + dictWord{11, 10, 951}, + dictWord{139, 10, 980}, + dictWord{4, 0, 945}, + dictWord{6, 0, 1811}, + dictWord{6, 0, 1845}, + dictWord{ + 6, + 0, + 1853, + }, + dictWord{6, 0, 1858}, + dictWord{8, 0, 862}, + dictWord{12, 0, 782}, + dictWord{12, 0, 788}, + dictWord{18, 0, 160}, + dictWord{148, 0, 117}, + dictWord{ + 132, + 10, + 717, + }, + dictWord{4, 0, 925}, + dictWord{5, 0, 803}, + dictWord{8, 0, 698}, + dictWord{138, 0, 828}, + dictWord{134, 0, 1416}, + dictWord{132, 0, 610}, + dictWord{ + 139, + 0, + 992, + }, + dictWord{6, 0, 878}, + dictWord{134, 0, 1477}, + dictWord{135, 0, 1847}, + dictWord{138, 11, 531}, + dictWord{137, 11, 539}, + dictWord{134, 11, 272}, + dictWord{133, 0, 383}, + dictWord{134, 0, 1404}, + dictWord{132, 10, 489}, + dictWord{4, 11, 9}, + dictWord{5, 11, 128}, + dictWord{7, 11, 368}, + dictWord{ + 11, + 11, + 480, + }, + dictWord{148, 11, 3}, + dictWord{136, 0, 986}, + dictWord{9, 0, 660}, + dictWord{138, 0, 347}, + dictWord{135, 10, 892}, + dictWord{136, 11, 682}, + dictWord{ + 7, + 0, + 572, + }, + dictWord{9, 0, 592}, + dictWord{11, 0, 680}, + dictWord{12, 0, 356}, + dictWord{140, 0, 550}, + dictWord{7, 0, 1411}, + dictWord{138, 11, 527}, + dictWord{ + 4, + 11, + 2, + }, + dictWord{7, 11, 545}, + dictWord{135, 11, 894}, + dictWord{137, 10, 473}, + dictWord{11, 0, 64}, + dictWord{7, 11, 481}, + dictWord{7, 10, 819}, + dictWord{9, 10, 26}, + dictWord{9, 10, 392}, + dictWord{9, 11, 792}, + dictWord{10, 10, 152}, + dictWord{10, 10, 226}, + dictWord{12, 10, 276}, + dictWord{12, 10, 426}, + dictWord{ + 12, + 10, + 589, + }, + dictWord{13, 10, 460}, + dictWord{15, 10, 97}, + dictWord{19, 10, 48}, + dictWord{148, 10, 104}, + dictWord{135, 10, 51}, + dictWord{136, 11, 445}, + dictWord{136, 11, 646}, + dictWord{135, 0, 606}, + dictWord{132, 10, 674}, + dictWord{6, 0, 1829}, + dictWord{134, 0, 1830}, + dictWord{132, 10, 770}, + dictWord{ + 5, + 10, + 79, + }, + dictWord{7, 10, 1027}, + dictWord{7, 10, 1477}, + dictWord{139, 10, 52}, + dictWord{5, 11, 530}, + dictWord{142, 11, 113}, + dictWord{134, 10, 1666}, + dictWord{ + 7, + 0, + 748, + }, + dictWord{139, 0, 700}, + dictWord{134, 10, 195}, + dictWord{133, 10, 789}, + dictWord{9, 0, 87}, + dictWord{10, 0, 365}, + dictWord{4, 10, 251}, + dictWord{ + 4, + 10, + 688, + }, + dictWord{7, 10, 513}, + dictWord{135, 10, 1284}, + dictWord{136, 11, 111}, + dictWord{133, 0, 127}, + dictWord{6, 0, 198}, + dictWord{140, 0, 83}, + dictWord{133, 11, 556}, + dictWord{133, 10, 889}, + dictWord{4, 10, 160}, + dictWord{5, 10, 330}, + dictWord{7, 10, 1434}, + dictWord{136, 10, 174}, + dictWord{5, 0, 276}, + dictWord{6, 0, 55}, + dictWord{7, 0, 1369}, + dictWord{138, 0, 864}, + dictWord{8, 11, 16}, + dictWord{140, 11, 568}, + dictWord{6, 0, 1752}, + dictWord{136, 0, 726}, + dictWord{135, 0, 1066}, + dictWord{133, 0, 764}, + dictWord{6, 11, 186}, + dictWord{137, 11, 426}, + dictWord{11, 0, 683}, + dictWord{139, 11, 683}, + dictWord{ + 6, + 0, + 309, + }, + dictWord{7, 0, 331}, + dictWord{138, 0, 550}, + dictWord{133, 10, 374}, + dictWord{6, 0, 1212}, + dictWord{6, 0, 1852}, + dictWord{7, 0, 1062}, + dictWord{ + 8, + 0, + 874, + }, + dictWord{8, 0, 882}, + dictWord{138, 0, 936}, + dictWord{132, 11, 585}, + dictWord{134, 0, 1364}, + dictWord{7, 0, 986}, + dictWord{133, 10, 731}, + dictWord{ + 6, + 0, + 723, + }, + dictWord{6, 0, 1408}, + dictWord{138, 0, 381}, + dictWord{135, 0, 1573}, + dictWord{134, 0, 1025}, + dictWord{4, 10, 626}, + dictWord{5, 10, 642}, + dictWord{ + 6, + 10, + 425, + }, + dictWord{10, 10, 202}, + dictWord{139, 10, 141}, + dictWord{4, 11, 93}, + dictWord{5, 11, 252}, + dictWord{6, 11, 229}, + dictWord{7, 11, 291}, + dictWord{ + 9, + 11, + 550, + }, + dictWord{139, 11, 644}, + dictWord{137, 11, 749}, + dictWord{137, 11, 162}, + dictWord{132, 11, 381}, + dictWord{135, 0, 1559}, + dictWord{ + 6, + 0, + 194, + }, + dictWord{7, 0, 133}, + dictWord{10, 0, 493}, + dictWord{10, 0, 570}, + dictWord{139, 0, 664}, + dictWord{5, 0, 24}, + dictWord{5, 0, 569}, + dictWord{6, 0, 3}, + dictWord{ + 6, + 0, + 119, + }, + dictWord{6, 0, 143}, + dictWord{6, 0, 440}, + dictWord{7, 0, 295}, + dictWord{7, 0, 599}, + dictWord{7, 0, 1686}, + dictWord{7, 0, 1854}, + dictWord{8, 0, 424}, + dictWord{ + 9, + 0, + 43, + }, + dictWord{9, 0, 584}, + dictWord{9, 0, 760}, + dictWord{10, 0, 148}, + dictWord{10, 0, 328}, + dictWord{11, 0, 159}, + dictWord{11, 0, 253}, + dictWord{11, 0, 506}, + dictWord{12, 0, 487}, + dictWord{140, 0, 531}, + dictWord{6, 0, 661}, + dictWord{134, 0, 1517}, + dictWord{136, 10, 835}, + dictWord{151, 10, 17}, + dictWord{5, 0, 14}, + dictWord{5, 0, 892}, + dictWord{6, 0, 283}, + dictWord{7, 0, 234}, + dictWord{136, 0, 537}, + dictWord{139, 0, 541}, + dictWord{4, 0, 126}, + dictWord{8, 0, 635}, + dictWord{ + 147, + 0, + 34, + }, + dictWord{4, 0, 316}, + dictWord{4, 0, 495}, + dictWord{135, 0, 1561}, + dictWord{4, 11, 187}, + dictWord{5, 11, 184}, + dictWord{5, 11, 690}, + dictWord{ + 7, + 11, + 1869, + }, + dictWord{138, 11, 756}, + dictWord{139, 11, 783}, + dictWord{4, 0, 998}, + dictWord{137, 0, 861}, + dictWord{136, 0, 1009}, + dictWord{139, 11, 292}, + dictWord{5, 11, 21}, + dictWord{6, 11, 77}, + dictWord{6, 11, 157}, + dictWord{7, 11, 974}, + dictWord{7, 11, 1301}, + dictWord{7, 11, 1339}, + dictWord{7, 11, 1490}, + dictWord{ + 7, + 11, + 1873, + }, + dictWord{137, 11, 628}, + dictWord{7, 11, 1283}, + dictWord{9, 11, 227}, + dictWord{9, 11, 499}, + dictWord{10, 11, 341}, + dictWord{11, 11, 325}, + dictWord{11, 11, 408}, + dictWord{14, 11, 180}, + dictWord{15, 11, 144}, + dictWord{18, 11, 47}, + dictWord{147, 11, 49}, + dictWord{4, 0, 64}, + dictWord{5, 0, 352}, + dictWord{5, 0, 720}, + dictWord{6, 0, 368}, + dictWord{139, 0, 359}, + dictWord{5, 10, 384}, + dictWord{8, 10, 455}, + dictWord{140, 10, 48}, + dictWord{5, 10, 264}, + dictWord{ + 134, + 10, + 184, + }, + dictWord{7, 0, 1577}, + dictWord{10, 0, 304}, + dictWord{10, 0, 549}, + dictWord{12, 0, 365}, + dictWord{13, 0, 220}, + dictWord{13, 0, 240}, + dictWord{ + 142, + 0, + 33, + }, + dictWord{134, 0, 1107}, + dictWord{134, 0, 929}, + dictWord{135, 0, 1142}, + dictWord{6, 0, 175}, + dictWord{137, 0, 289}, + dictWord{5, 0, 432}, + dictWord{ + 133, + 0, + 913, + }, + dictWord{6, 0, 279}, + dictWord{7, 0, 219}, + dictWord{5, 10, 633}, + dictWord{135, 10, 1323}, + dictWord{7, 0, 785}, + dictWord{7, 10, 359}, + dictWord{ + 8, + 10, + 243, + }, + dictWord{140, 10, 175}, + dictWord{139, 0, 595}, + dictWord{132, 10, 105}, + dictWord{8, 11, 398}, + dictWord{9, 11, 681}, + dictWord{139, 11, 632}, + dictWord{140, 0, 80}, + dictWord{5, 0, 931}, + dictWord{134, 0, 1698}, + dictWord{142, 11, 241}, + dictWord{134, 11, 20}, + dictWord{134, 0, 1323}, + dictWord{11, 0, 526}, + dictWord{11, 0, 939}, + dictWord{141, 0, 290}, + dictWord{5, 0, 774}, + dictWord{6, 0, 780}, + dictWord{6, 0, 1637}, + dictWord{6, 0, 1686}, + dictWord{6, 0, 1751}, + dictWord{ + 8, + 0, + 559, + }, + dictWord{141, 0, 109}, + dictWord{141, 0, 127}, + dictWord{7, 0, 1167}, + dictWord{11, 0, 934}, + dictWord{13, 0, 391}, + dictWord{17, 0, 76}, + dictWord{ + 135, + 11, + 709, + }, + dictWord{135, 0, 963}, + dictWord{6, 0, 260}, + dictWord{135, 0, 1484}, + dictWord{134, 0, 573}, + dictWord{4, 10, 758}, + dictWord{139, 11, 941}, + dictWord{135, 10, 1649}, + dictWord{145, 11, 36}, + dictWord{4, 0, 292}, + dictWord{137, 0, 580}, + dictWord{4, 0, 736}, + dictWord{5, 0, 871}, + dictWord{6, 0, 1689}, + dictWord{135, 0, 1944}, + dictWord{7, 11, 945}, + dictWord{11, 11, 713}, + dictWord{139, 11, 744}, + dictWord{134, 0, 1164}, + dictWord{135, 11, 937}, + dictWord{ + 6, + 0, + 1922, + }, + dictWord{9, 0, 982}, + dictWord{15, 0, 173}, + dictWord{15, 0, 178}, + dictWord{15, 0, 200}, + dictWord{18, 0, 189}, + dictWord{18, 0, 207}, + dictWord{21, 0, 47}, + dictWord{135, 11, 1652}, + dictWord{7, 0, 1695}, + dictWord{139, 10, 128}, + dictWord{6, 0, 63}, + dictWord{135, 0, 920}, + dictWord{133, 0, 793}, + dictWord{ + 143, + 11, + 134, + }, + dictWord{133, 10, 918}, + dictWord{5, 0, 67}, + dictWord{6, 0, 62}, + dictWord{6, 0, 374}, + dictWord{135, 0, 1391}, + dictWord{9, 0, 790}, + dictWord{12, 0, 47}, + dictWord{4, 11, 579}, + dictWord{5, 11, 226}, + dictWord{5, 11, 323}, + dictWord{135, 11, 960}, + dictWord{10, 11, 784}, + dictWord{141, 11, 191}, + dictWord{4, 0, 391}, + dictWord{135, 0, 1169}, + dictWord{137, 0, 443}, + dictWord{13, 11, 232}, + dictWord{146, 11, 35}, + dictWord{132, 10, 340}, + dictWord{132, 0, 271}, + dictWord{ + 137, + 11, + 313, + }, + dictWord{5, 11, 973}, + dictWord{137, 11, 659}, + dictWord{134, 0, 1140}, + dictWord{6, 11, 135}, + dictWord{135, 11, 1176}, + dictWord{4, 0, 253}, + dictWord{5, 0, 544}, + dictWord{7, 0, 300}, + dictWord{137, 0, 340}, + dictWord{7, 0, 897}, + dictWord{5, 10, 985}, + dictWord{7, 10, 509}, + dictWord{145, 10, 96}, + dictWord{ + 138, + 11, + 735, + }, + dictWord{135, 10, 1919}, + dictWord{138, 0, 890}, + dictWord{5, 0, 818}, + dictWord{134, 0, 1122}, + dictWord{5, 0, 53}, + dictWord{5, 0, 541}, + dictWord{ + 6, + 0, + 94, + }, + dictWord{6, 0, 499}, + dictWord{7, 0, 230}, + dictWord{139, 0, 321}, + dictWord{4, 0, 920}, + dictWord{5, 0, 25}, + dictWord{5, 0, 790}, + dictWord{6, 0, 457}, + dictWord{ + 7, + 0, + 853, + }, + dictWord{8, 0, 788}, + dictWord{142, 11, 31}, + dictWord{132, 10, 247}, + dictWord{135, 11, 314}, + dictWord{132, 0, 468}, + dictWord{7, 0, 243}, + dictWord{ + 6, + 10, + 337, + }, + dictWord{7, 10, 494}, + dictWord{8, 10, 27}, + dictWord{8, 10, 599}, + dictWord{138, 10, 153}, + dictWord{4, 10, 184}, + dictWord{5, 10, 390}, + dictWord{ + 7, + 10, + 618, + }, + dictWord{7, 10, 1456}, + dictWord{139, 10, 710}, + dictWord{134, 0, 870}, + dictWord{134, 0, 1238}, + dictWord{134, 0, 1765}, + dictWord{10, 0, 853}, + dictWord{10, 0, 943}, + dictWord{14, 0, 437}, + dictWord{14, 0, 439}, + dictWord{14, 0, 443}, + dictWord{14, 0, 446}, + dictWord{14, 0, 452}, + dictWord{14, 0, 469}, + dictWord{ + 14, + 0, + 471, + }, + dictWord{14, 0, 473}, + dictWord{16, 0, 93}, + dictWord{16, 0, 102}, + dictWord{16, 0, 110}, + dictWord{148, 0, 121}, + dictWord{4, 0, 605}, + dictWord{ + 7, + 0, + 518, + }, + dictWord{7, 0, 1282}, + dictWord{7, 0, 1918}, + dictWord{10, 0, 180}, + dictWord{139, 0, 218}, + dictWord{133, 0, 822}, + dictWord{4, 0, 634}, + dictWord{ + 11, + 0, + 916, + }, + dictWord{142, 0, 419}, + dictWord{6, 11, 281}, + dictWord{7, 11, 6}, + dictWord{8, 11, 282}, + dictWord{8, 11, 480}, + dictWord{8, 11, 499}, + dictWord{9, 11, 198}, + dictWord{10, 11, 143}, + dictWord{10, 11, 169}, + dictWord{10, 11, 211}, + dictWord{10, 11, 417}, + dictWord{10, 11, 574}, + dictWord{11, 11, 147}, + dictWord{ + 11, + 11, + 395, + }, + dictWord{12, 11, 75}, + dictWord{12, 11, 407}, + dictWord{12, 11, 608}, + dictWord{13, 11, 500}, + dictWord{142, 11, 251}, + dictWord{134, 0, 898}, + dictWord{ + 6, + 0, + 36, + }, + dictWord{7, 0, 658}, + dictWord{8, 0, 454}, + dictWord{150, 11, 48}, + dictWord{133, 11, 674}, + dictWord{135, 11, 1776}, + dictWord{4, 11, 419}, + dictWord{ + 10, + 10, + 227, + }, + dictWord{11, 10, 497}, + dictWord{11, 10, 709}, + dictWord{140, 10, 415}, + dictWord{6, 10, 360}, + dictWord{7, 10, 1664}, + dictWord{136, 10, 478}, + dictWord{137, 0, 806}, + dictWord{12, 11, 508}, + dictWord{14, 11, 102}, + dictWord{14, 11, 226}, + dictWord{144, 11, 57}, + dictWord{135, 11, 1123}, + dictWord{ + 4, + 11, + 138, + }, + dictWord{7, 11, 1012}, + dictWord{7, 11, 1280}, + dictWord{137, 11, 76}, + dictWord{5, 11, 29}, + dictWord{140, 11, 638}, + dictWord{136, 10, 699}, + dictWord{134, 0, 1326}, + dictWord{132, 0, 104}, + dictWord{135, 11, 735}, + dictWord{132, 10, 739}, + dictWord{134, 0, 1331}, + dictWord{7, 0, 260}, + dictWord{ + 135, + 11, + 260, + }, + dictWord{135, 11, 1063}, + dictWord{7, 0, 45}, + dictWord{9, 0, 542}, + dictWord{9, 0, 566}, + dictWord{10, 0, 728}, + dictWord{137, 10, 869}, + dictWord{ + 4, + 10, + 67, + }, + dictWord{5, 10, 422}, + dictWord{7, 10, 1037}, + dictWord{7, 10, 1289}, + dictWord{7, 10, 1555}, + dictWord{9, 10, 741}, + dictWord{145, 10, 108}, + dictWord{ + 139, + 0, + 263, + }, + dictWord{134, 0, 1516}, + dictWord{14, 0, 146}, + dictWord{15, 0, 42}, + dictWord{16, 0, 23}, + dictWord{17, 0, 86}, + dictWord{146, 0, 17}, + dictWord{ + 138, + 0, + 468, + }, + dictWord{136, 0, 1005}, + dictWord{4, 11, 17}, + dictWord{5, 11, 23}, + dictWord{7, 11, 995}, + dictWord{11, 11, 383}, + dictWord{11, 11, 437}, + dictWord{ + 12, + 11, + 460, + }, + dictWord{140, 11, 532}, + dictWord{7, 0, 87}, + dictWord{142, 0, 288}, + dictWord{138, 10, 96}, + dictWord{135, 11, 626}, + dictWord{144, 10, 26}, + dictWord{ + 7, + 0, + 988, + }, + dictWord{7, 0, 1939}, + dictWord{9, 0, 64}, + dictWord{9, 0, 502}, + dictWord{12, 0, 22}, + dictWord{12, 0, 34}, + dictWord{13, 0, 12}, + dictWord{13, 0, 234}, + dictWord{147, 0, 77}, + dictWord{13, 0, 133}, + dictWord{8, 10, 203}, + dictWord{11, 10, 823}, + dictWord{11, 10, 846}, + dictWord{12, 10, 482}, + dictWord{13, 10, 277}, + dictWord{13, 10, 302}, + dictWord{13, 10, 464}, + dictWord{14, 10, 205}, + dictWord{142, 10, 221}, + dictWord{4, 10, 449}, + dictWord{133, 10, 718}, + dictWord{ + 135, + 0, + 141, + }, + dictWord{6, 0, 1842}, + dictWord{136, 0, 872}, + dictWord{8, 11, 70}, + dictWord{12, 11, 171}, + dictWord{141, 11, 272}, + dictWord{4, 10, 355}, + dictWord{ + 6, + 10, + 311, + }, + dictWord{9, 10, 256}, + dictWord{138, 10, 404}, + dictWord{132, 0, 619}, + dictWord{137, 0, 261}, + dictWord{10, 11, 233}, + dictWord{10, 10, 758}, + dictWord{139, 11, 76}, + dictWord{5, 0, 246}, + dictWord{8, 0, 189}, + dictWord{9, 0, 355}, + dictWord{9, 0, 512}, + dictWord{10, 0, 124}, + dictWord{10, 0, 453}, + dictWord{ + 11, + 0, + 143, + }, + dictWord{11, 0, 416}, + dictWord{11, 0, 859}, + dictWord{141, 0, 341}, + dictWord{134, 11, 442}, + dictWord{133, 10, 827}, + dictWord{5, 10, 64}, + dictWord{ + 140, + 10, + 581, + }, + dictWord{4, 10, 442}, + dictWord{7, 10, 1047}, + dictWord{7, 10, 1352}, + dictWord{135, 10, 1643}, + dictWord{134, 11, 1709}, + dictWord{5, 0, 678}, + dictWord{6, 0, 305}, + dictWord{7, 0, 775}, + dictWord{7, 0, 1065}, + dictWord{133, 10, 977}, + dictWord{11, 11, 69}, + dictWord{12, 11, 105}, + dictWord{12, 11, 117}, + dictWord{13, 11, 213}, + dictWord{14, 11, 13}, + dictWord{14, 11, 62}, + dictWord{14, 11, 177}, + dictWord{14, 11, 421}, + dictWord{15, 11, 19}, + dictWord{146, 11, 141}, + dictWord{137, 11, 309}, + dictWord{5, 0, 35}, + dictWord{7, 0, 862}, + dictWord{7, 0, 1886}, + dictWord{138, 0, 179}, + dictWord{136, 0, 285}, + dictWord{132, 0, 517}, + dictWord{7, 11, 976}, + dictWord{9, 11, 146}, + dictWord{10, 11, 206}, + dictWord{10, 11, 596}, + dictWord{13, 11, 218}, + dictWord{142, 11, 153}, + dictWord{ + 132, + 10, + 254, + }, + dictWord{6, 0, 214}, + dictWord{12, 0, 540}, + dictWord{4, 10, 275}, + dictWord{7, 10, 1219}, + dictWord{140, 10, 376}, + dictWord{8, 0, 667}, + dictWord{ + 11, + 0, + 403, + }, + dictWord{146, 0, 83}, + dictWord{12, 0, 74}, + dictWord{10, 11, 648}, + dictWord{11, 11, 671}, + dictWord{143, 11, 46}, + dictWord{135, 0, 125}, + dictWord{ + 134, + 10, + 1753, + }, + dictWord{133, 0, 761}, + dictWord{6, 0, 912}, + dictWord{4, 11, 518}, + dictWord{6, 10, 369}, + dictWord{6, 10, 502}, + dictWord{7, 10, 1036}, + dictWord{ + 7, + 11, + 1136, + }, + dictWord{8, 10, 348}, + dictWord{9, 10, 452}, + dictWord{10, 10, 26}, + dictWord{11, 10, 224}, + dictWord{11, 10, 387}, + dictWord{11, 10, 772}, + dictWord{12, 10, 95}, + dictWord{12, 10, 629}, + dictWord{13, 10, 195}, + dictWord{13, 10, 207}, + dictWord{13, 10, 241}, + dictWord{14, 10, 260}, + dictWord{14, 10, 270}, + dictWord{143, 10, 140}, + dictWord{10, 0, 131}, + dictWord{140, 0, 72}, + dictWord{132, 10, 269}, + dictWord{5, 10, 480}, + dictWord{7, 10, 532}, + dictWord{ + 7, + 10, + 1197, + }, + dictWord{7, 10, 1358}, + dictWord{8, 10, 291}, + dictWord{11, 10, 349}, + dictWord{142, 10, 396}, + dictWord{8, 11, 689}, + dictWord{137, 11, 863}, + dictWord{ + 8, + 0, + 333, + }, + dictWord{138, 0, 182}, + dictWord{4, 11, 18}, + dictWord{7, 11, 145}, + dictWord{7, 11, 444}, + dictWord{7, 11, 1278}, + dictWord{8, 11, 49}, + dictWord{ + 8, + 11, + 400, + }, + dictWord{9, 11, 71}, + dictWord{9, 11, 250}, + dictWord{10, 11, 459}, + dictWord{12, 11, 160}, + dictWord{144, 11, 24}, + dictWord{14, 11, 35}, + dictWord{ + 142, + 11, + 191, + }, + dictWord{135, 11, 1864}, + dictWord{135, 0, 1338}, + dictWord{148, 10, 15}, + dictWord{14, 0, 94}, + dictWord{15, 0, 65}, + dictWord{16, 0, 4}, + dictWord{ + 16, + 0, + 77, + }, + dictWord{16, 0, 80}, + dictWord{145, 0, 5}, + dictWord{12, 11, 82}, + dictWord{143, 11, 36}, + dictWord{133, 11, 1010}, + dictWord{133, 0, 449}, + dictWord{ + 133, + 0, + 646, + }, + dictWord{7, 0, 86}, + dictWord{8, 0, 103}, + dictWord{135, 10, 657}, + dictWord{7, 0, 2028}, + dictWord{138, 0, 641}, + dictWord{136, 10, 533}, + dictWord{ + 134, + 0, + 1, + }, + dictWord{139, 11, 970}, + dictWord{5, 11, 87}, + dictWord{7, 11, 313}, + dictWord{7, 11, 1103}, + dictWord{10, 11, 112}, + dictWord{10, 11, 582}, + dictWord{ + 11, + 11, + 389, + }, + dictWord{11, 11, 813}, + dictWord{12, 11, 385}, + dictWord{13, 11, 286}, + dictWord{14, 11, 124}, + dictWord{146, 11, 108}, + dictWord{6, 0, 869}, + dictWord{ + 132, + 11, + 267, + }, + dictWord{6, 0, 277}, + dictWord{7, 0, 1274}, + dictWord{7, 0, 1386}, + dictWord{146, 0, 87}, + dictWord{6, 0, 187}, + dictWord{7, 0, 39}, + dictWord{7, 0, 1203}, + dictWord{8, 0, 380}, + dictWord{14, 0, 117}, + dictWord{149, 0, 28}, + dictWord{4, 10, 211}, + dictWord{4, 10, 332}, + dictWord{5, 10, 335}, + dictWord{6, 10, 238}, + dictWord{ + 7, + 10, + 269, + }, + dictWord{7, 10, 811}, + dictWord{7, 10, 1797}, + dictWord{8, 10, 836}, + dictWord{9, 10, 507}, + dictWord{141, 10, 242}, + dictWord{4, 0, 785}, + dictWord{ + 5, + 0, + 368, + }, + dictWord{6, 0, 297}, + dictWord{7, 0, 793}, + dictWord{139, 0, 938}, + dictWord{7, 0, 464}, + dictWord{8, 0, 558}, + dictWord{11, 0, 105}, + dictWord{12, 0, 231}, + dictWord{14, 0, 386}, + dictWord{15, 0, 102}, + dictWord{148, 0, 75}, + dictWord{133, 10, 1009}, + dictWord{8, 0, 877}, + dictWord{140, 0, 731}, + dictWord{ + 139, + 11, + 289, + }, + dictWord{10, 11, 249}, + dictWord{139, 11, 209}, + dictWord{132, 11, 561}, + dictWord{134, 0, 1608}, + dictWord{132, 11, 760}, + dictWord{134, 0, 1429}, + dictWord{9, 11, 154}, + dictWord{140, 11, 485}, + dictWord{5, 10, 228}, + dictWord{6, 10, 203}, + dictWord{7, 10, 156}, + dictWord{8, 10, 347}, + dictWord{ + 137, + 10, + 265, + }, + dictWord{7, 0, 1010}, + dictWord{11, 0, 733}, + dictWord{11, 0, 759}, + dictWord{13, 0, 34}, + dictWord{14, 0, 427}, + dictWord{146, 0, 45}, + dictWord{7, 10, 1131}, + dictWord{135, 10, 1468}, + dictWord{136, 11, 255}, + dictWord{7, 0, 1656}, + dictWord{9, 0, 369}, + dictWord{10, 0, 338}, + dictWord{10, 0, 490}, + dictWord{ + 11, + 0, + 154, + }, + dictWord{11, 0, 545}, + dictWord{11, 0, 775}, + dictWord{13, 0, 77}, + dictWord{141, 0, 274}, + dictWord{133, 11, 621}, + dictWord{134, 0, 1038}, + dictWord{ + 4, + 11, + 368, + }, + dictWord{135, 11, 641}, + dictWord{6, 0, 2010}, + dictWord{8, 0, 979}, + dictWord{8, 0, 985}, + dictWord{10, 0, 951}, + dictWord{138, 0, 1011}, + dictWord{ + 134, + 0, + 1005, + }, + dictWord{19, 0, 121}, + dictWord{5, 10, 291}, + dictWord{5, 10, 318}, + dictWord{7, 10, 765}, + dictWord{9, 10, 389}, + dictWord{140, 10, 548}, + dictWord{ + 5, + 0, + 20, + }, + dictWord{6, 0, 298}, + dictWord{7, 0, 659}, + dictWord{137, 0, 219}, + dictWord{7, 0, 1440}, + dictWord{11, 0, 854}, + dictWord{11, 0, 872}, + dictWord{11, 0, 921}, + dictWord{12, 0, 551}, + dictWord{13, 0, 472}, + dictWord{142, 0, 367}, + dictWord{5, 0, 490}, + dictWord{6, 0, 615}, + dictWord{6, 0, 620}, + dictWord{135, 0, 683}, + dictWord{ + 6, + 0, + 1070, + }, + dictWord{134, 0, 1597}, + dictWord{139, 0, 522}, + dictWord{132, 0, 439}, + dictWord{136, 0, 669}, + dictWord{6, 0, 766}, + dictWord{6, 0, 1143}, + dictWord{ + 6, + 0, + 1245, + }, + dictWord{10, 10, 525}, + dictWord{139, 10, 82}, + dictWord{9, 11, 92}, + dictWord{147, 11, 91}, + dictWord{6, 0, 668}, + dictWord{134, 0, 1218}, + dictWord{ + 6, + 11, + 525, + }, + dictWord{9, 11, 876}, + dictWord{140, 11, 284}, + dictWord{132, 0, 233}, + dictWord{136, 0, 547}, + dictWord{132, 10, 422}, + dictWord{5, 10, 355}, + dictWord{145, 10, 0}, + dictWord{6, 11, 300}, + dictWord{135, 11, 1515}, + dictWord{4, 0, 482}, + dictWord{137, 10, 905}, + dictWord{4, 0, 886}, + dictWord{7, 0, 346}, + dictWord{133, 11, 594}, + dictWord{133, 10, 865}, + dictWord{5, 10, 914}, + dictWord{134, 10, 1625}, + dictWord{135, 0, 334}, + dictWord{5, 0, 795}, + dictWord{ + 6, + 0, + 1741, + }, + dictWord{133, 10, 234}, + dictWord{135, 10, 1383}, + dictWord{6, 11, 1641}, + dictWord{136, 11, 820}, + dictWord{135, 0, 371}, + dictWord{7, 11, 1313}, + dictWord{138, 11, 660}, + dictWord{135, 10, 1312}, + dictWord{135, 0, 622}, + dictWord{7, 0, 625}, + dictWord{135, 0, 1750}, + dictWord{135, 0, 339}, + dictWord{ + 4, + 0, + 203, + }, + dictWord{135, 0, 1936}, + dictWord{15, 0, 29}, + dictWord{16, 0, 38}, + dictWord{15, 11, 29}, + dictWord{144, 11, 38}, + dictWord{5, 0, 338}, + dictWord{ + 135, + 0, + 1256, + }, + dictWord{135, 10, 1493}, + dictWord{10, 0, 130}, + dictWord{6, 10, 421}, + dictWord{7, 10, 61}, + dictWord{7, 10, 1540}, + dictWord{138, 10, 501}, + dictWord{ + 6, + 11, + 389, + }, + dictWord{7, 11, 149}, + dictWord{9, 11, 142}, + dictWord{138, 11, 94}, + dictWord{137, 10, 341}, + dictWord{11, 0, 678}, + dictWord{12, 0, 307}, + dictWord{142, 10, 98}, + dictWord{6, 11, 8}, + dictWord{7, 11, 1881}, + dictWord{136, 11, 91}, + dictWord{135, 0, 2044}, + dictWord{6, 0, 770}, + dictWord{6, 0, 802}, + dictWord{ + 6, + 0, + 812, + }, + dictWord{7, 0, 311}, + dictWord{9, 0, 308}, + dictWord{12, 0, 255}, + dictWord{6, 10, 102}, + dictWord{7, 10, 72}, + dictWord{15, 10, 142}, + dictWord{ + 147, + 10, + 67, + }, + dictWord{151, 10, 30}, + dictWord{135, 10, 823}, + dictWord{135, 0, 1266}, + dictWord{135, 11, 1746}, + dictWord{135, 10, 1870}, + dictWord{4, 0, 400}, + dictWord{5, 0, 267}, + dictWord{135, 0, 232}, + dictWord{7, 11, 24}, + dictWord{11, 11, 542}, + dictWord{139, 11, 852}, + dictWord{135, 11, 1739}, + dictWord{4, 11, 503}, + dictWord{135, 11, 1661}, + dictWord{5, 11, 130}, + dictWord{7, 11, 1314}, + dictWord{9, 11, 610}, + dictWord{10, 11, 718}, + dictWord{11, 11, 601}, + dictWord{ + 11, + 11, + 819, + }, + dictWord{11, 11, 946}, + dictWord{140, 11, 536}, + dictWord{10, 11, 149}, + dictWord{11, 11, 280}, + dictWord{142, 11, 336}, + dictWord{7, 0, 739}, + dictWord{11, 0, 690}, + dictWord{7, 11, 1946}, + dictWord{8, 10, 48}, + dictWord{8, 10, 88}, + dictWord{8, 10, 582}, + dictWord{8, 10, 681}, + dictWord{9, 10, 373}, + dictWord{ + 9, + 10, + 864, + }, + dictWord{11, 10, 157}, + dictWord{11, 10, 843}, + dictWord{148, 10, 27}, + dictWord{134, 0, 990}, + dictWord{4, 10, 88}, + dictWord{5, 10, 137}, + dictWord{ + 5, + 10, + 174, + }, + dictWord{5, 10, 777}, + dictWord{6, 10, 1664}, + dictWord{6, 10, 1725}, + dictWord{7, 10, 77}, + dictWord{7, 10, 426}, + dictWord{7, 10, 1317}, + dictWord{ + 7, + 10, + 1355, + }, + dictWord{8, 10, 126}, + dictWord{8, 10, 563}, + dictWord{9, 10, 523}, + dictWord{9, 10, 750}, + dictWord{10, 10, 310}, + dictWord{10, 10, 836}, + dictWord{ + 11, + 10, + 42, + }, + dictWord{11, 10, 318}, + dictWord{11, 10, 731}, + dictWord{12, 10, 68}, + dictWord{12, 10, 92}, + dictWord{12, 10, 507}, + dictWord{12, 10, 692}, + dictWord{ + 13, + 10, + 81, + }, + dictWord{13, 10, 238}, + dictWord{13, 10, 374}, + dictWord{14, 10, 436}, + dictWord{18, 10, 138}, + dictWord{19, 10, 78}, + dictWord{19, 10, 111}, + dictWord{20, 10, 55}, + dictWord{20, 10, 77}, + dictWord{148, 10, 92}, + dictWord{141, 10, 418}, + dictWord{7, 0, 1831}, + dictWord{132, 10, 938}, + dictWord{6, 0, 776}, + dictWord{134, 0, 915}, + dictWord{138, 10, 351}, + dictWord{5, 11, 348}, + dictWord{6, 11, 522}, + dictWord{6, 10, 1668}, + dictWord{7, 10, 1499}, + dictWord{8, 10, 117}, + dictWord{9, 10, 314}, + dictWord{138, 10, 174}, + dictWord{135, 10, 707}, + dictWord{132, 0, 613}, + dictWord{133, 10, 403}, + dictWord{132, 11, 392}, + dictWord{ + 5, + 11, + 433, + }, + dictWord{9, 11, 633}, + dictWord{139, 11, 629}, + dictWord{133, 0, 763}, + dictWord{132, 0, 878}, + dictWord{132, 0, 977}, + dictWord{132, 0, 100}, + dictWord{6, 0, 463}, + dictWord{4, 10, 44}, + dictWord{5, 10, 311}, + dictWord{7, 10, 639}, + dictWord{7, 10, 762}, + dictWord{7, 10, 1827}, + dictWord{9, 10, 8}, + dictWord{ + 9, + 10, + 462, + }, + dictWord{148, 10, 83}, + dictWord{134, 11, 234}, + dictWord{4, 10, 346}, + dictWord{7, 10, 115}, + dictWord{9, 10, 180}, + dictWord{9, 10, 456}, + dictWord{ + 138, + 10, + 363, + }, + dictWord{5, 0, 362}, + dictWord{5, 0, 443}, + dictWord{6, 0, 318}, + dictWord{7, 0, 1019}, + dictWord{139, 0, 623}, + dictWord{5, 0, 463}, + dictWord{8, 0, 296}, + dictWord{7, 11, 140}, + dictWord{7, 11, 1950}, + dictWord{8, 11, 680}, + dictWord{11, 11, 817}, + dictWord{147, 11, 88}, + dictWord{7, 11, 1222}, + dictWord{ + 138, + 11, + 386, + }, + dictWord{142, 0, 137}, + dictWord{132, 0, 454}, + dictWord{7, 0, 1914}, + dictWord{6, 11, 5}, + dictWord{7, 10, 1051}, + dictWord{9, 10, 545}, + dictWord{ + 11, + 11, + 249, + }, + dictWord{12, 11, 313}, + dictWord{16, 11, 66}, + dictWord{145, 11, 26}, + dictWord{135, 0, 1527}, + dictWord{145, 0, 58}, + dictWord{148, 11, 59}, + dictWord{ + 5, + 0, + 48, + }, + dictWord{5, 0, 404}, + dictWord{6, 0, 557}, + dictWord{7, 0, 458}, + dictWord{8, 0, 597}, + dictWord{10, 0, 455}, + dictWord{10, 0, 606}, + dictWord{11, 0, 49}, + dictWord{ + 11, + 0, + 548, + }, + dictWord{12, 0, 476}, + dictWord{13, 0, 18}, + dictWord{141, 0, 450}, + dictWord{5, 11, 963}, + dictWord{134, 11, 1773}, + dictWord{133, 0, 729}, + dictWord{138, 11, 586}, + dictWord{5, 0, 442}, + dictWord{135, 0, 1984}, + dictWord{134, 0, 449}, + dictWord{144, 0, 40}, + dictWord{4, 0, 853}, + dictWord{7, 11, 180}, + dictWord{8, 11, 509}, + dictWord{136, 11, 792}, + dictWord{6, 10, 185}, + dictWord{7, 10, 1899}, + dictWord{9, 10, 875}, + dictWord{139, 10, 673}, + dictWord{ + 134, + 11, + 524, + }, + dictWord{12, 0, 227}, + dictWord{4, 10, 327}, + dictWord{5, 10, 478}, + dictWord{7, 10, 1332}, + dictWord{136, 10, 753}, + dictWord{6, 0, 1491}, + dictWord{ + 5, + 10, + 1020, + }, + dictWord{133, 10, 1022}, + dictWord{4, 10, 103}, + dictWord{133, 10, 401}, + dictWord{132, 11, 931}, + dictWord{4, 10, 499}, + dictWord{135, 10, 1421}, + dictWord{5, 0, 55}, + dictWord{7, 0, 376}, + dictWord{140, 0, 161}, + dictWord{133, 0, 450}, + dictWord{6, 0, 1174}, + dictWord{134, 0, 1562}, + dictWord{10, 0, 62}, + dictWord{13, 0, 400}, + dictWord{135, 11, 1837}, + dictWord{140, 0, 207}, + dictWord{135, 0, 869}, + dictWord{4, 11, 773}, + dictWord{5, 11, 618}, + dictWord{ + 137, + 11, + 756, + }, + dictWord{132, 10, 96}, + dictWord{4, 0, 213}, + dictWord{7, 0, 223}, + dictWord{8, 0, 80}, + dictWord{135, 10, 968}, + dictWord{4, 11, 90}, + dictWord{5, 11, 337}, + dictWord{5, 11, 545}, + dictWord{7, 11, 754}, + dictWord{9, 11, 186}, + dictWord{10, 11, 72}, + dictWord{10, 11, 782}, + dictWord{11, 11, 513}, + dictWord{11, 11, 577}, + dictWord{11, 11, 610}, + dictWord{11, 11, 889}, + dictWord{11, 11, 961}, + dictWord{12, 11, 354}, + dictWord{12, 11, 362}, + dictWord{12, 11, 461}, + dictWord{ + 12, + 11, + 595, + }, + dictWord{13, 11, 79}, + dictWord{143, 11, 121}, + dictWord{7, 0, 381}, + dictWord{7, 0, 806}, + dictWord{7, 0, 820}, + dictWord{8, 0, 354}, + dictWord{8, 0, 437}, + dictWord{8, 0, 787}, + dictWord{9, 0, 657}, + dictWord{10, 0, 58}, + dictWord{10, 0, 339}, + dictWord{10, 0, 749}, + dictWord{11, 0, 914}, + dictWord{12, 0, 162}, + dictWord{ + 13, + 0, + 75, + }, + dictWord{14, 0, 106}, + dictWord{14, 0, 198}, + dictWord{14, 0, 320}, + dictWord{14, 0, 413}, + dictWord{146, 0, 43}, + dictWord{136, 0, 747}, + dictWord{ + 136, + 0, + 954, + }, + dictWord{134, 0, 1073}, + dictWord{135, 0, 556}, + dictWord{7, 11, 151}, + dictWord{9, 11, 329}, + dictWord{139, 11, 254}, + dictWord{5, 0, 692}, + dictWord{ + 134, + 0, + 1395, + }, + dictWord{6, 10, 563}, + dictWord{137, 10, 224}, + dictWord{134, 0, 191}, + dictWord{132, 0, 804}, + dictWord{9, 11, 187}, + dictWord{10, 11, 36}, + dictWord{17, 11, 44}, + dictWord{146, 11, 64}, + dictWord{7, 11, 165}, + dictWord{7, 11, 919}, + dictWord{136, 11, 517}, + dictWord{4, 11, 506}, + dictWord{5, 11, 295}, + dictWord{7, 11, 1680}, + dictWord{15, 11, 14}, + dictWord{144, 11, 5}, + dictWord{4, 0, 706}, + dictWord{6, 0, 162}, + dictWord{7, 0, 1960}, + dictWord{136, 0, 831}, + dictWord{ + 135, + 11, + 1376, + }, + dictWord{7, 11, 987}, + dictWord{9, 11, 688}, + dictWord{10, 11, 522}, + dictWord{11, 11, 788}, + dictWord{140, 11, 566}, + dictWord{150, 0, 35}, + dictWord{138, 0, 426}, + dictWord{135, 0, 1235}, + dictWord{135, 11, 1741}, + dictWord{7, 11, 389}, + dictWord{7, 11, 700}, + dictWord{7, 11, 940}, + dictWord{ + 8, + 11, + 514, + }, + dictWord{9, 11, 116}, + dictWord{9, 11, 535}, + dictWord{10, 11, 118}, + dictWord{11, 11, 107}, + dictWord{11, 11, 148}, + dictWord{11, 11, 922}, + dictWord{ + 12, + 11, + 254, + }, + dictWord{12, 11, 421}, + dictWord{142, 11, 238}, + dictWord{134, 0, 1234}, + dictWord{132, 11, 743}, + dictWord{4, 10, 910}, + dictWord{5, 10, 832}, + dictWord{135, 11, 1335}, + dictWord{141, 0, 96}, + dictWord{135, 11, 185}, + dictWord{146, 0, 149}, + dictWord{4, 0, 204}, + dictWord{137, 0, 902}, + dictWord{ + 4, + 11, + 784, + }, + dictWord{133, 11, 745}, + dictWord{136, 0, 833}, + dictWord{136, 0, 949}, + dictWord{7, 0, 366}, + dictWord{9, 0, 287}, + dictWord{12, 0, 199}, + dictWord{ + 12, + 0, + 556, + }, + dictWord{12, 0, 577}, + dictWord{5, 11, 81}, + dictWord{7, 11, 146}, + dictWord{7, 11, 1342}, + dictWord{7, 11, 1446}, + dictWord{8, 11, 53}, + dictWord{8, 11, 561}, + dictWord{8, 11, 694}, + dictWord{8, 11, 754}, + dictWord{9, 11, 97}, + dictWord{9, 11, 115}, + dictWord{9, 11, 894}, + dictWord{10, 11, 462}, + dictWord{10, 11, 813}, + dictWord{11, 11, 230}, + dictWord{11, 11, 657}, + dictWord{11, 11, 699}, + dictWord{11, 11, 748}, + dictWord{12, 11, 119}, + dictWord{12, 11, 200}, + dictWord{ + 12, + 11, + 283, + }, + dictWord{14, 11, 273}, + dictWord{145, 11, 15}, + dictWord{5, 11, 408}, + dictWord{137, 11, 747}, + dictWord{9, 11, 498}, + dictWord{140, 11, 181}, + dictWord{ + 6, + 0, + 2020, + }, + dictWord{136, 0, 992}, + dictWord{5, 0, 356}, + dictWord{135, 0, 224}, + dictWord{134, 0, 784}, + dictWord{7, 0, 630}, + dictWord{9, 0, 567}, + dictWord{ + 11, + 0, + 150, + }, + dictWord{11, 0, 444}, + dictWord{13, 0, 119}, + dictWord{8, 10, 528}, + dictWord{137, 10, 348}, + dictWord{134, 0, 539}, + dictWord{4, 10, 20}, + dictWord{ + 133, + 10, + 616, + }, + dictWord{142, 0, 27}, + dictWord{7, 11, 30}, + dictWord{8, 11, 86}, + dictWord{8, 11, 315}, + dictWord{8, 11, 700}, + dictWord{9, 11, 576}, + dictWord{9, 11, 858}, + dictWord{11, 11, 310}, + dictWord{11, 11, 888}, + dictWord{11, 11, 904}, + dictWord{12, 11, 361}, + dictWord{141, 11, 248}, + dictWord{138, 11, 839}, + dictWord{ + 134, + 0, + 755, + }, + dictWord{134, 0, 1063}, + dictWord{7, 10, 1091}, + dictWord{135, 10, 1765}, + dictWord{134, 11, 428}, + dictWord{7, 11, 524}, + dictWord{8, 11, 169}, + dictWord{8, 11, 234}, + dictWord{9, 11, 480}, + dictWord{138, 11, 646}, + dictWord{139, 0, 814}, + dictWord{7, 11, 1462}, + dictWord{139, 11, 659}, + dictWord{ + 4, + 10, + 26, + }, + dictWord{5, 10, 429}, + dictWord{6, 10, 245}, + dictWord{7, 10, 704}, + dictWord{7, 10, 1379}, + dictWord{135, 10, 1474}, + dictWord{7, 11, 1205}, + dictWord{ + 138, + 11, + 637, + }, + dictWord{139, 11, 803}, + dictWord{132, 10, 621}, + dictWord{136, 0, 987}, + dictWord{4, 11, 266}, + dictWord{8, 11, 4}, + dictWord{9, 11, 39}, + dictWord{ + 10, + 11, + 166, + }, + dictWord{11, 11, 918}, + dictWord{12, 11, 635}, + dictWord{20, 11, 10}, + dictWord{22, 11, 27}, + dictWord{150, 11, 43}, + dictWord{4, 0, 235}, + dictWord{ + 135, + 0, + 255, + }, + dictWord{4, 0, 194}, + dictWord{5, 0, 584}, + dictWord{6, 0, 384}, + dictWord{7, 0, 583}, + dictWord{10, 0, 761}, + dictWord{11, 0, 760}, + dictWord{139, 0, 851}, + dictWord{133, 10, 542}, + dictWord{134, 0, 1086}, + dictWord{133, 10, 868}, + dictWord{8, 0, 1016}, + dictWord{136, 0, 1018}, + dictWord{7, 0, 1396}, + dictWord{ + 7, + 11, + 1396, + }, + dictWord{136, 10, 433}, + dictWord{135, 10, 1495}, + dictWord{138, 10, 215}, + dictWord{141, 10, 124}, + dictWord{7, 11, 157}, + dictWord{ + 8, + 11, + 279, + }, + dictWord{9, 11, 759}, + dictWord{16, 11, 31}, + dictWord{16, 11, 39}, + dictWord{16, 11, 75}, + dictWord{18, 11, 24}, + dictWord{20, 11, 42}, + dictWord{152, 11, 1}, + dictWord{5, 0, 562}, + dictWord{134, 11, 604}, + dictWord{134, 0, 913}, + dictWord{5, 0, 191}, + dictWord{137, 0, 271}, + dictWord{4, 0, 470}, + dictWord{6, 0, 153}, + dictWord{7, 0, 1503}, + dictWord{7, 0, 1923}, + dictWord{10, 0, 701}, + dictWord{11, 0, 132}, + dictWord{11, 0, 227}, + dictWord{11, 0, 320}, + dictWord{11, 0, 436}, + dictWord{ + 11, + 0, + 525, + }, + dictWord{11, 0, 855}, + dictWord{11, 0, 873}, + dictWord{12, 0, 41}, + dictWord{12, 0, 286}, + dictWord{13, 0, 103}, + dictWord{13, 0, 284}, + dictWord{ + 14, + 0, + 255, + }, + dictWord{14, 0, 262}, + dictWord{15, 0, 117}, + dictWord{143, 0, 127}, + dictWord{7, 0, 475}, + dictWord{12, 0, 45}, + dictWord{147, 10, 112}, + dictWord{ + 132, + 11, + 567, + }, + dictWord{137, 11, 859}, + dictWord{6, 0, 713}, + dictWord{6, 0, 969}, + dictWord{6, 0, 1290}, + dictWord{134, 0, 1551}, + dictWord{133, 0, 327}, + dictWord{ + 6, + 0, + 552, + }, + dictWord{6, 0, 1292}, + dictWord{7, 0, 1754}, + dictWord{137, 0, 604}, + dictWord{4, 0, 223}, + dictWord{6, 0, 359}, + dictWord{11, 0, 3}, + dictWord{13, 0, 108}, + dictWord{14, 0, 89}, + dictWord{16, 0, 22}, + dictWord{5, 11, 762}, + dictWord{7, 11, 1880}, + dictWord{9, 11, 680}, + dictWord{139, 11, 798}, + dictWord{5, 0, 80}, + dictWord{ + 6, + 0, + 405, + }, + dictWord{7, 0, 403}, + dictWord{7, 0, 1502}, + dictWord{8, 0, 456}, + dictWord{9, 0, 487}, + dictWord{9, 0, 853}, + dictWord{9, 0, 889}, + dictWord{10, 0, 309}, + dictWord{ + 11, + 0, + 721, + }, + dictWord{11, 0, 994}, + dictWord{12, 0, 430}, + dictWord{141, 0, 165}, + dictWord{133, 11, 298}, + dictWord{132, 10, 647}, + dictWord{134, 0, 2016}, + dictWord{18, 10, 10}, + dictWord{146, 11, 10}, + dictWord{4, 0, 453}, + dictWord{5, 0, 887}, + dictWord{6, 0, 535}, + dictWord{8, 0, 6}, + dictWord{8, 0, 543}, + dictWord{ + 136, + 0, + 826, + }, + dictWord{136, 0, 975}, + dictWord{10, 0, 961}, + dictWord{138, 0, 962}, + dictWord{138, 10, 220}, + dictWord{6, 0, 1891}, + dictWord{6, 0, 1893}, + dictWord{ + 9, + 0, + 916, + }, + dictWord{9, 0, 965}, + dictWord{9, 0, 972}, + dictWord{12, 0, 801}, + dictWord{12, 0, 859}, + dictWord{12, 0, 883}, + dictWord{15, 0, 226}, + dictWord{149, 0, 51}, + dictWord{132, 10, 109}, + dictWord{135, 11, 267}, + dictWord{7, 11, 92}, + dictWord{7, 11, 182}, + dictWord{8, 11, 453}, + dictWord{9, 11, 204}, + dictWord{11, 11, 950}, + dictWord{12, 11, 94}, + dictWord{12, 11, 644}, + dictWord{16, 11, 20}, + dictWord{16, 11, 70}, + dictWord{16, 11, 90}, + dictWord{147, 11, 55}, + dictWord{ + 134, + 10, + 1746, + }, + dictWord{6, 11, 71}, + dictWord{7, 11, 845}, + dictWord{7, 11, 1308}, + dictWord{8, 11, 160}, + dictWord{137, 11, 318}, + dictWord{5, 0, 101}, + dictWord{6, 0, 88}, + dictWord{7, 0, 263}, + dictWord{7, 0, 628}, + dictWord{7, 0, 1677}, + dictWord{8, 0, 349}, + dictWord{9, 0, 100}, + dictWord{10, 0, 677}, + dictWord{14, 0, 169}, + dictWord{ + 14, + 0, + 302, + }, + dictWord{14, 0, 313}, + dictWord{15, 0, 48}, + dictWord{15, 0, 84}, + dictWord{7, 11, 237}, + dictWord{8, 11, 664}, + dictWord{9, 11, 42}, + dictWord{9, 11, 266}, + dictWord{9, 11, 380}, + dictWord{9, 11, 645}, + dictWord{10, 11, 177}, + dictWord{138, 11, 276}, + dictWord{138, 11, 69}, + dictWord{4, 0, 310}, + dictWord{7, 0, 708}, + dictWord{7, 0, 996}, + dictWord{9, 0, 795}, + dictWord{10, 0, 390}, + dictWord{10, 0, 733}, + dictWord{11, 0, 451}, + dictWord{12, 0, 249}, + dictWord{14, 0, 115}, + dictWord{ + 14, + 0, + 286, + }, + dictWord{143, 0, 100}, + dictWord{5, 0, 587}, + dictWord{4, 10, 40}, + dictWord{10, 10, 67}, + dictWord{11, 10, 117}, + dictWord{11, 10, 768}, + dictWord{ + 139, + 10, + 935, + }, + dictWord{6, 0, 1942}, + dictWord{7, 0, 512}, + dictWord{136, 0, 983}, + dictWord{7, 10, 992}, + dictWord{8, 10, 301}, + dictWord{9, 10, 722}, + dictWord{12, 10, 63}, + dictWord{13, 10, 29}, + dictWord{14, 10, 161}, + dictWord{143, 10, 18}, + dictWord{136, 11, 76}, + dictWord{139, 10, 923}, + dictWord{134, 0, 645}, + dictWord{ + 134, + 0, + 851, + }, + dictWord{4, 0, 498}, + dictWord{132, 11, 293}, + dictWord{7, 0, 217}, + dictWord{8, 0, 140}, + dictWord{10, 0, 610}, + dictWord{14, 11, 352}, + dictWord{ + 17, + 11, + 53, + }, + dictWord{18, 11, 146}, + dictWord{18, 11, 152}, + dictWord{19, 11, 11}, + dictWord{150, 11, 54}, + dictWord{134, 0, 1448}, + dictWord{138, 11, 841}, + dictWord{133, 0, 905}, + dictWord{4, 11, 605}, + dictWord{7, 11, 518}, + dictWord{7, 11, 1282}, + dictWord{7, 11, 1918}, + dictWord{10, 11, 180}, + dictWord{139, 11, 218}, + dictWord{139, 11, 917}, + dictWord{135, 10, 825}, + dictWord{140, 10, 328}, + dictWord{4, 0, 456}, + dictWord{7, 0, 105}, + dictWord{7, 0, 358}, + dictWord{7, 0, 1637}, + dictWord{8, 0, 643}, + dictWord{139, 0, 483}, + dictWord{134, 0, 792}, + dictWord{6, 11, 96}, + dictWord{135, 11, 1426}, + dictWord{137, 11, 691}, + dictWord{ + 4, + 11, + 651, + }, + dictWord{133, 11, 289}, + dictWord{7, 11, 688}, + dictWord{8, 11, 35}, + dictWord{9, 11, 511}, + dictWord{10, 11, 767}, + dictWord{147, 11, 118}, + dictWord{ + 150, + 0, + 56, + }, + dictWord{5, 0, 243}, + dictWord{5, 0, 535}, + dictWord{6, 10, 204}, + dictWord{10, 10, 320}, + dictWord{10, 10, 583}, + dictWord{13, 10, 502}, + dictWord{ + 14, + 10, + 72, + }, + dictWord{14, 10, 274}, + dictWord{14, 10, 312}, + dictWord{14, 10, 344}, + dictWord{15, 10, 159}, + dictWord{16, 10, 62}, + dictWord{16, 10, 69}, + dictWord{ + 17, + 10, + 30, + }, + dictWord{18, 10, 42}, + dictWord{18, 10, 53}, + dictWord{18, 10, 84}, + dictWord{18, 10, 140}, + dictWord{19, 10, 68}, + dictWord{19, 10, 85}, + dictWord{20, 10, 5}, + dictWord{20, 10, 45}, + dictWord{20, 10, 101}, + dictWord{22, 10, 7}, + dictWord{150, 10, 20}, + dictWord{4, 10, 558}, + dictWord{6, 10, 390}, + dictWord{7, 10, 162}, + dictWord{7, 10, 689}, + dictWord{9, 10, 360}, + dictWord{138, 10, 653}, + dictWord{146, 11, 23}, + dictWord{135, 0, 1748}, + dictWord{5, 10, 856}, + dictWord{ + 6, + 10, + 1672, + }, + dictWord{6, 10, 1757}, + dictWord{134, 10, 1781}, + dictWord{5, 0, 539}, + dictWord{5, 0, 754}, + dictWord{6, 0, 876}, + dictWord{132, 11, 704}, + dictWord{ + 135, + 11, + 1078, + }, + dictWord{5, 10, 92}, + dictWord{10, 10, 736}, + dictWord{140, 10, 102}, + dictWord{17, 0, 91}, + dictWord{5, 10, 590}, + dictWord{137, 10, 213}, + dictWord{134, 0, 1565}, + dictWord{6, 0, 91}, + dictWord{135, 0, 435}, + dictWord{4, 0, 939}, + dictWord{140, 0, 792}, + dictWord{134, 0, 1399}, + dictWord{4, 0, 16}, + dictWord{ + 5, + 0, + 316, + }, + dictWord{5, 0, 842}, + dictWord{6, 0, 370}, + dictWord{6, 0, 1778}, + dictWord{8, 0, 166}, + dictWord{11, 0, 812}, + dictWord{12, 0, 206}, + dictWord{12, 0, 351}, + dictWord{14, 0, 418}, + dictWord{16, 0, 15}, + dictWord{16, 0, 34}, + dictWord{18, 0, 3}, + dictWord{19, 0, 3}, + dictWord{19, 0, 7}, + dictWord{20, 0, 4}, + dictWord{21, 0, 21}, + dictWord{ + 4, + 11, + 720, + }, + dictWord{133, 11, 306}, + dictWord{144, 0, 95}, + dictWord{133, 11, 431}, + dictWord{132, 11, 234}, + dictWord{135, 0, 551}, + dictWord{4, 0, 999}, + dictWord{6, 0, 1966}, + dictWord{134, 0, 2042}, + dictWord{7, 0, 619}, + dictWord{10, 0, 547}, + dictWord{11, 0, 122}, + dictWord{12, 0, 601}, + dictWord{15, 0, 7}, + dictWord{148, 0, 20}, + dictWord{5, 11, 464}, + dictWord{6, 11, 236}, + dictWord{7, 11, 276}, + dictWord{7, 11, 696}, + dictWord{7, 11, 914}, + dictWord{7, 11, 1108}, + dictWord{ + 7, + 11, + 1448, + }, + dictWord{9, 11, 15}, + dictWord{9, 11, 564}, + dictWord{10, 11, 14}, + dictWord{12, 11, 565}, + dictWord{13, 11, 449}, + dictWord{14, 11, 53}, + dictWord{ + 15, + 11, + 13, + }, + dictWord{16, 11, 64}, + dictWord{145, 11, 41}, + dictWord{6, 0, 884}, + dictWord{6, 0, 1019}, + dictWord{134, 0, 1150}, + dictWord{6, 11, 1767}, + dictWord{ + 12, + 11, + 194, + }, + dictWord{145, 11, 107}, + dictWord{136, 10, 503}, + dictWord{133, 11, 840}, + dictWord{7, 0, 671}, + dictWord{134, 10, 466}, + dictWord{132, 0, 888}, + dictWord{4, 0, 149}, + dictWord{138, 0, 368}, + dictWord{4, 0, 154}, + dictWord{7, 0, 1134}, + dictWord{136, 0, 105}, + dictWord{135, 0, 983}, + dictWord{9, 11, 642}, + dictWord{11, 11, 236}, + dictWord{142, 11, 193}, + dictWord{4, 0, 31}, + dictWord{6, 0, 429}, + dictWord{7, 0, 962}, + dictWord{9, 0, 458}, + dictWord{139, 0, 691}, + dictWord{ + 6, + 0, + 643, + }, + dictWord{134, 0, 1102}, + dictWord{132, 0, 312}, + dictWord{4, 11, 68}, + dictWord{5, 11, 634}, + dictWord{6, 11, 386}, + dictWord{7, 11, 794}, + dictWord{ + 8, + 11, + 273, + }, + dictWord{9, 11, 563}, + dictWord{10, 11, 105}, + dictWord{10, 11, 171}, + dictWord{11, 11, 94}, + dictWord{139, 11, 354}, + dictWord{133, 0, 740}, + dictWord{ + 135, + 0, + 1642, + }, + dictWord{4, 11, 95}, + dictWord{7, 11, 416}, + dictWord{8, 11, 211}, + dictWord{139, 11, 830}, + dictWord{132, 0, 236}, + dictWord{138, 10, 241}, + dictWord{7, 11, 731}, + dictWord{13, 11, 20}, + dictWord{143, 11, 11}, + dictWord{5, 0, 836}, + dictWord{5, 0, 857}, + dictWord{6, 0, 1680}, + dictWord{135, 0, 59}, + dictWord{ + 10, + 0, + 68, + }, + dictWord{11, 0, 494}, + dictWord{152, 11, 6}, + dictWord{4, 0, 81}, + dictWord{139, 0, 867}, + dictWord{135, 0, 795}, + dictWord{133, 11, 689}, + dictWord{ + 4, + 0, + 1001, + }, + dictWord{5, 0, 282}, + dictWord{6, 0, 1932}, + dictWord{6, 0, 1977}, + dictWord{6, 0, 1987}, + dictWord{6, 0, 1992}, + dictWord{8, 0, 650}, + dictWord{8, 0, 919}, + dictWord{8, 0, 920}, + dictWord{8, 0, 923}, + dictWord{8, 0, 926}, + dictWord{8, 0, 927}, + dictWord{8, 0, 931}, + dictWord{8, 0, 939}, + dictWord{8, 0, 947}, + dictWord{8, 0, 956}, + dictWord{8, 0, 997}, + dictWord{9, 0, 907}, + dictWord{10, 0, 950}, + dictWord{10, 0, 953}, + dictWord{10, 0, 954}, + dictWord{10, 0, 956}, + dictWord{10, 0, 958}, + dictWord{ + 10, + 0, + 959, + }, + dictWord{10, 0, 964}, + dictWord{10, 0, 970}, + dictWord{10, 0, 972}, + dictWord{10, 0, 973}, + dictWord{10, 0, 975}, + dictWord{10, 0, 976}, + dictWord{ + 10, + 0, + 980, + }, + dictWord{10, 0, 981}, + dictWord{10, 0, 984}, + dictWord{10, 0, 988}, + dictWord{10, 0, 990}, + dictWord{10, 0, 995}, + dictWord{10, 0, 999}, + dictWord{ + 10, + 0, + 1002, + }, + dictWord{10, 0, 1003}, + dictWord{10, 0, 1005}, + dictWord{10, 0, 1006}, + dictWord{10, 0, 1008}, + dictWord{10, 0, 1009}, + dictWord{10, 0, 1012}, + dictWord{10, 0, 1014}, + dictWord{10, 0, 1015}, + dictWord{10, 0, 1019}, + dictWord{10, 0, 1020}, + dictWord{10, 0, 1022}, + dictWord{12, 0, 959}, + dictWord{12, 0, 961}, + dictWord{12, 0, 962}, + dictWord{12, 0, 963}, + dictWord{12, 0, 964}, + dictWord{12, 0, 965}, + dictWord{12, 0, 967}, + dictWord{12, 0, 968}, + dictWord{12, 0, 969}, + dictWord{12, 0, 970}, + dictWord{12, 0, 971}, + dictWord{12, 0, 972}, + dictWord{12, 0, 973}, + dictWord{12, 0, 974}, + dictWord{12, 0, 975}, + dictWord{12, 0, 976}, + dictWord{ + 12, + 0, + 977, + }, + dictWord{12, 0, 979}, + dictWord{12, 0, 981}, + dictWord{12, 0, 982}, + dictWord{12, 0, 983}, + dictWord{12, 0, 984}, + dictWord{12, 0, 985}, + dictWord{ + 12, + 0, + 986, + }, + dictWord{12, 0, 987}, + dictWord{12, 0, 989}, + dictWord{12, 0, 990}, + dictWord{12, 0, 992}, + dictWord{12, 0, 993}, + dictWord{12, 0, 995}, + dictWord{12, 0, 998}, + dictWord{12, 0, 999}, + dictWord{12, 0, 1000}, + dictWord{12, 0, 1001}, + dictWord{12, 0, 1002}, + dictWord{12, 0, 1004}, + dictWord{12, 0, 1005}, + dictWord{ + 12, + 0, + 1006, + }, + dictWord{12, 0, 1007}, + dictWord{12, 0, 1008}, + dictWord{12, 0, 1009}, + dictWord{12, 0, 1010}, + dictWord{12, 0, 1011}, + dictWord{12, 0, 1012}, + dictWord{12, 0, 1014}, + dictWord{12, 0, 1015}, + dictWord{12, 0, 1016}, + dictWord{12, 0, 1017}, + dictWord{12, 0, 1018}, + dictWord{12, 0, 1019}, + dictWord{ + 12, + 0, + 1022, + }, + dictWord{12, 0, 1023}, + dictWord{14, 0, 475}, + dictWord{14, 0, 477}, + dictWord{14, 0, 478}, + dictWord{14, 0, 479}, + dictWord{14, 0, 480}, + dictWord{ + 14, + 0, + 482, + }, + dictWord{14, 0, 483}, + dictWord{14, 0, 484}, + dictWord{14, 0, 485}, + dictWord{14, 0, 486}, + dictWord{14, 0, 487}, + dictWord{14, 0, 488}, + dictWord{14, 0, 489}, + dictWord{14, 0, 490}, + dictWord{14, 0, 491}, + dictWord{14, 0, 492}, + dictWord{14, 0, 493}, + dictWord{14, 0, 494}, + dictWord{14, 0, 495}, + dictWord{14, 0, 496}, + dictWord{14, 0, 497}, + dictWord{14, 0, 498}, + dictWord{14, 0, 499}, + dictWord{14, 0, 500}, + dictWord{14, 0, 501}, + dictWord{14, 0, 502}, + dictWord{14, 0, 503}, + dictWord{ + 14, + 0, + 504, + }, + dictWord{14, 0, 506}, + dictWord{14, 0, 507}, + dictWord{14, 0, 508}, + dictWord{14, 0, 509}, + dictWord{14, 0, 510}, + dictWord{14, 0, 511}, + dictWord{ + 16, + 0, + 113, + }, + dictWord{16, 0, 114}, + dictWord{16, 0, 115}, + dictWord{16, 0, 117}, + dictWord{16, 0, 118}, + dictWord{16, 0, 119}, + dictWord{16, 0, 121}, + dictWord{16, 0, 122}, + dictWord{16, 0, 123}, + dictWord{16, 0, 124}, + dictWord{16, 0, 125}, + dictWord{16, 0, 126}, + dictWord{16, 0, 127}, + dictWord{18, 0, 242}, + dictWord{18, 0, 243}, + dictWord{18, 0, 244}, + dictWord{18, 0, 245}, + dictWord{18, 0, 248}, + dictWord{18, 0, 249}, + dictWord{18, 0, 250}, + dictWord{18, 0, 251}, + dictWord{18, 0, 252}, + dictWord{ + 18, + 0, + 253, + }, + dictWord{18, 0, 254}, + dictWord{18, 0, 255}, + dictWord{20, 0, 125}, + dictWord{20, 0, 126}, + dictWord{148, 0, 127}, + dictWord{7, 11, 1717}, + dictWord{ + 7, + 11, + 1769, + }, + dictWord{138, 11, 546}, + dictWord{7, 11, 1127}, + dictWord{7, 11, 1572}, + dictWord{10, 11, 297}, + dictWord{10, 11, 422}, + dictWord{11, 11, 764}, + dictWord{11, 11, 810}, + dictWord{12, 11, 264}, + dictWord{13, 11, 102}, + dictWord{13, 11, 300}, + dictWord{13, 11, 484}, + dictWord{14, 11, 147}, + dictWord{ + 14, + 11, + 229, + }, + dictWord{17, 11, 71}, + dictWord{18, 11, 118}, + dictWord{147, 11, 120}, + dictWord{6, 0, 1148}, + dictWord{134, 0, 1586}, + dictWord{132, 0, 775}, + dictWord{135, 10, 954}, + dictWord{133, 11, 864}, + dictWord{133, 11, 928}, + dictWord{138, 11, 189}, + dictWord{135, 10, 1958}, + dictWord{6, 10, 549}, + dictWord{ + 8, + 10, + 34, + }, + dictWord{8, 10, 283}, + dictWord{9, 10, 165}, + dictWord{138, 10, 475}, + dictWord{5, 10, 652}, + dictWord{5, 10, 701}, + dictWord{135, 10, 449}, + dictWord{135, 11, 695}, + dictWord{4, 10, 655}, + dictWord{7, 10, 850}, + dictWord{17, 10, 75}, + dictWord{146, 10, 137}, + dictWord{140, 11, 682}, + dictWord{ + 133, + 11, + 523, + }, + dictWord{8, 0, 970}, + dictWord{136, 10, 670}, + dictWord{136, 11, 555}, + dictWord{7, 11, 76}, + dictWord{8, 11, 44}, + dictWord{9, 11, 884}, + dictWord{ + 10, + 11, + 580, + }, + dictWord{11, 11, 399}, + dictWord{11, 11, 894}, + dictWord{15, 11, 122}, + dictWord{18, 11, 144}, + dictWord{147, 11, 61}, + dictWord{6, 10, 159}, + dictWord{ + 6, + 10, + 364, + }, + dictWord{7, 10, 516}, + dictWord{7, 10, 1439}, + dictWord{137, 10, 518}, + dictWord{4, 0, 71}, + dictWord{5, 0, 376}, + dictWord{7, 0, 119}, + dictWord{ + 138, + 0, + 665, + }, + dictWord{141, 10, 151}, + dictWord{11, 0, 827}, + dictWord{14, 0, 34}, + dictWord{143, 0, 148}, + dictWord{133, 11, 518}, + dictWord{4, 0, 479}, + dictWord{ + 135, + 11, + 1787, + }, + dictWord{135, 11, 1852}, + dictWord{135, 10, 993}, + dictWord{7, 0, 607}, + dictWord{136, 0, 99}, + dictWord{134, 0, 1960}, + dictWord{132, 0, 793}, + dictWord{4, 0, 41}, + dictWord{5, 0, 74}, + dictWord{7, 0, 1627}, + dictWord{11, 0, 871}, + dictWord{140, 0, 619}, + dictWord{7, 0, 94}, + dictWord{11, 0, 329}, + dictWord{ + 11, + 0, + 965, + }, + dictWord{12, 0, 241}, + dictWord{14, 0, 354}, + dictWord{15, 0, 22}, + dictWord{148, 0, 63}, + dictWord{7, 10, 501}, + dictWord{9, 10, 111}, + dictWord{10, 10, 141}, + dictWord{11, 10, 332}, + dictWord{13, 10, 43}, + dictWord{13, 10, 429}, + dictWord{14, 10, 130}, + dictWord{14, 10, 415}, + dictWord{145, 10, 102}, + dictWord{ + 9, + 0, + 209, + }, + dictWord{137, 0, 300}, + dictWord{134, 0, 1497}, + dictWord{138, 11, 255}, + dictWord{4, 11, 934}, + dictWord{5, 11, 138}, + dictWord{136, 11, 610}, + dictWord{133, 0, 98}, + dictWord{6, 0, 1316}, + dictWord{10, 11, 804}, + dictWord{138, 11, 832}, + dictWord{8, 11, 96}, + dictWord{9, 11, 36}, + dictWord{10, 11, 607}, + dictWord{11, 11, 423}, + dictWord{11, 11, 442}, + dictWord{12, 11, 309}, + dictWord{14, 11, 199}, + dictWord{15, 11, 90}, + dictWord{145, 11, 110}, + dictWord{ + 132, + 0, + 463, + }, + dictWord{5, 10, 149}, + dictWord{136, 10, 233}, + dictWord{133, 10, 935}, + dictWord{4, 11, 652}, + dictWord{8, 11, 320}, + dictWord{9, 11, 13}, + dictWord{ + 9, + 11, + 398, + }, + dictWord{9, 11, 727}, + dictWord{10, 11, 75}, + dictWord{10, 11, 184}, + dictWord{10, 11, 230}, + dictWord{10, 11, 564}, + dictWord{10, 11, 569}, + dictWord{ + 11, + 11, + 973, + }, + dictWord{12, 11, 70}, + dictWord{12, 11, 189}, + dictWord{13, 11, 57}, + dictWord{13, 11, 257}, + dictWord{22, 11, 6}, + dictWord{150, 11, 16}, + dictWord{ + 142, + 0, + 291, + }, + dictWord{12, 10, 582}, + dictWord{146, 10, 131}, + dictWord{136, 10, 801}, + dictWord{133, 0, 984}, + dictWord{145, 11, 116}, + dictWord{4, 11, 692}, + dictWord{133, 11, 321}, + dictWord{4, 0, 182}, + dictWord{6, 0, 205}, + dictWord{135, 0, 220}, + dictWord{4, 0, 42}, + dictWord{9, 0, 205}, + dictWord{9, 0, 786}, + dictWord{ + 138, + 0, + 659, + }, + dictWord{6, 0, 801}, + dictWord{11, 11, 130}, + dictWord{140, 11, 609}, + dictWord{132, 0, 635}, + dictWord{5, 11, 345}, + dictWord{135, 11, 1016}, + dictWord{139, 0, 533}, + dictWord{132, 0, 371}, + dictWord{4, 0, 272}, + dictWord{135, 0, 836}, + dictWord{6, 0, 1282}, + dictWord{135, 11, 1100}, + dictWord{5, 0, 825}, + dictWord{134, 0, 1640}, + dictWord{135, 11, 1325}, + dictWord{133, 11, 673}, + dictWord{4, 11, 287}, + dictWord{133, 11, 1018}, + dictWord{135, 0, 357}, + dictWord{ + 6, + 0, + 467, + }, + dictWord{137, 0, 879}, + dictWord{7, 0, 317}, + dictWord{135, 0, 569}, + dictWord{6, 0, 924}, + dictWord{134, 0, 1588}, + dictWord{5, 11, 34}, + dictWord{ + 5, + 10, + 406, + }, + dictWord{10, 11, 724}, + dictWord{12, 11, 444}, + dictWord{13, 11, 354}, + dictWord{18, 11, 32}, + dictWord{23, 11, 24}, + dictWord{23, 11, 31}, + dictWord{ + 152, + 11, + 5, + }, + dictWord{6, 0, 1795}, + dictWord{6, 0, 1835}, + dictWord{6, 0, 1836}, + dictWord{6, 0, 1856}, + dictWord{8, 0, 844}, + dictWord{8, 0, 849}, + dictWord{8, 0, 854}, + dictWord{8, 0, 870}, + dictWord{8, 0, 887}, + dictWord{10, 0, 852}, + dictWord{138, 0, 942}, + dictWord{6, 10, 69}, + dictWord{135, 10, 117}, + dictWord{137, 0, 307}, + dictWord{ + 4, + 0, + 944, + }, + dictWord{6, 0, 1799}, + dictWord{6, 0, 1825}, + dictWord{10, 0, 848}, + dictWord{10, 0, 875}, + dictWord{10, 0, 895}, + dictWord{10, 0, 899}, + dictWord{ + 10, + 0, + 902, + }, + dictWord{140, 0, 773}, + dictWord{11, 0, 43}, + dictWord{13, 0, 72}, + dictWord{141, 0, 142}, + dictWord{135, 10, 1830}, + dictWord{134, 11, 382}, + dictWord{ + 4, + 10, + 432, + }, + dictWord{135, 10, 824}, + dictWord{132, 11, 329}, + dictWord{7, 0, 1820}, + dictWord{139, 11, 124}, + dictWord{133, 10, 826}, + dictWord{ + 133, + 0, + 525, + }, + dictWord{132, 11, 906}, + dictWord{7, 11, 1940}, + dictWord{136, 11, 366}, + dictWord{138, 11, 10}, + dictWord{4, 11, 123}, + dictWord{4, 11, 649}, + dictWord{ + 5, + 11, + 605, + }, + dictWord{7, 11, 1509}, + dictWord{136, 11, 36}, + dictWord{6, 0, 110}, + dictWord{135, 0, 1681}, + dictWord{133, 0, 493}, + dictWord{133, 11, 767}, + dictWord{4, 0, 174}, + dictWord{135, 0, 911}, + dictWord{138, 11, 786}, + dictWord{8, 0, 417}, + dictWord{137, 0, 782}, + dictWord{133, 10, 1000}, + dictWord{7, 0, 733}, + dictWord{137, 0, 583}, + dictWord{4, 10, 297}, + dictWord{6, 10, 529}, + dictWord{7, 10, 152}, + dictWord{7, 10, 713}, + dictWord{7, 10, 1845}, + dictWord{8, 10, 710}, + dictWord{8, 10, 717}, + dictWord{12, 10, 639}, + dictWord{140, 10, 685}, + dictWord{4, 0, 32}, + dictWord{5, 0, 215}, + dictWord{6, 0, 269}, + dictWord{7, 0, 1782}, + dictWord{ + 7, + 0, + 1892, + }, + dictWord{10, 0, 16}, + dictWord{11, 0, 822}, + dictWord{11, 0, 954}, + dictWord{141, 0, 481}, + dictWord{4, 11, 273}, + dictWord{5, 11, 658}, + dictWord{ + 133, + 11, + 995, + }, + dictWord{136, 0, 477}, + dictWord{134, 11, 72}, + dictWord{135, 11, 1345}, + dictWord{5, 0, 308}, + dictWord{7, 0, 1088}, + dictWord{4, 10, 520}, + dictWord{ + 135, + 10, + 575, + }, + dictWord{133, 11, 589}, + dictWord{5, 0, 126}, + dictWord{8, 0, 297}, + dictWord{9, 0, 366}, + dictWord{140, 0, 374}, + dictWord{7, 0, 1551}, + dictWord{ + 139, + 0, + 361, + }, + dictWord{5, 11, 117}, + dictWord{6, 11, 514}, + dictWord{6, 11, 541}, + dictWord{7, 11, 1164}, + dictWord{7, 11, 1436}, + dictWord{8, 11, 220}, + dictWord{ + 8, + 11, + 648, + }, + dictWord{10, 11, 688}, + dictWord{139, 11, 560}, + dictWord{133, 11, 686}, + dictWord{4, 0, 946}, + dictWord{6, 0, 1807}, + dictWord{8, 0, 871}, + dictWord{ + 10, + 0, + 854, + }, + dictWord{10, 0, 870}, + dictWord{10, 0, 888}, + dictWord{10, 0, 897}, + dictWord{10, 0, 920}, + dictWord{12, 0, 722}, + dictWord{12, 0, 761}, + dictWord{ + 12, + 0, + 763, + }, + dictWord{12, 0, 764}, + dictWord{14, 0, 454}, + dictWord{14, 0, 465}, + dictWord{16, 0, 107}, + dictWord{18, 0, 167}, + dictWord{18, 0, 168}, + dictWord{ + 146, + 0, + 172, + }, + dictWord{132, 0, 175}, + dictWord{135, 0, 1307}, + dictWord{132, 0, 685}, + dictWord{135, 11, 1834}, + dictWord{133, 0, 797}, + dictWord{6, 0, 745}, + dictWord{ + 6, + 0, + 858, + }, + dictWord{134, 0, 963}, + dictWord{133, 0, 565}, + dictWord{5, 10, 397}, + dictWord{6, 10, 154}, + dictWord{7, 11, 196}, + dictWord{7, 10, 676}, + dictWord{ + 8, + 10, + 443, + }, + dictWord{8, 10, 609}, + dictWord{9, 10, 24}, + dictWord{9, 10, 325}, + dictWord{10, 10, 35}, + dictWord{10, 11, 765}, + dictWord{11, 11, 347}, + dictWord{ + 11, + 10, + 535, + }, + dictWord{11, 11, 552}, + dictWord{11, 11, 576}, + dictWord{11, 10, 672}, + dictWord{11, 11, 790}, + dictWord{11, 10, 1018}, + dictWord{12, 11, 263}, + dictWord{12, 10, 637}, + dictWord{13, 11, 246}, + dictWord{13, 11, 270}, + dictWord{13, 11, 395}, + dictWord{14, 11, 74}, + dictWord{14, 11, 176}, + dictWord{ + 14, + 11, + 190, + }, + dictWord{14, 11, 398}, + dictWord{14, 11, 412}, + dictWord{15, 11, 32}, + dictWord{15, 11, 63}, + dictWord{16, 10, 30}, + dictWord{16, 11, 88}, + dictWord{ + 147, + 11, + 105, + }, + dictWord{13, 11, 84}, + dictWord{141, 11, 122}, + dictWord{4, 0, 252}, + dictWord{7, 0, 1068}, + dictWord{10, 0, 434}, + dictWord{11, 0, 228}, + dictWord{ + 11, + 0, + 426, + }, + dictWord{13, 0, 231}, + dictWord{18, 0, 106}, + dictWord{148, 0, 87}, + dictWord{137, 0, 826}, + dictWord{4, 11, 589}, + dictWord{139, 11, 282}, + dictWord{ + 5, + 11, + 381, + }, + dictWord{135, 11, 1792}, + dictWord{132, 0, 791}, + dictWord{5, 0, 231}, + dictWord{10, 0, 509}, + dictWord{133, 10, 981}, + dictWord{7, 0, 601}, + dictWord{ + 9, + 0, + 277, + }, + dictWord{9, 0, 674}, + dictWord{10, 0, 178}, + dictWord{10, 0, 418}, + dictWord{10, 0, 571}, + dictWord{11, 0, 531}, + dictWord{12, 0, 113}, + dictWord{12, 0, 475}, + dictWord{13, 0, 99}, + dictWord{142, 0, 428}, + dictWord{4, 10, 56}, + dictWord{7, 11, 616}, + dictWord{7, 10, 1791}, + dictWord{8, 10, 607}, + dictWord{8, 10, 651}, + dictWord{10, 11, 413}, + dictWord{11, 10, 465}, + dictWord{11, 10, 835}, + dictWord{12, 10, 337}, + dictWord{141, 10, 480}, + dictWord{7, 0, 1591}, + dictWord{144, 0, 43}, + dictWord{9, 10, 158}, + dictWord{138, 10, 411}, + dictWord{135, 0, 1683}, + dictWord{8, 0, 289}, + dictWord{11, 0, 45}, + dictWord{12, 0, 278}, + dictWord{140, 0, 537}, + dictWord{6, 11, 120}, + dictWord{7, 11, 1188}, + dictWord{7, 11, 1710}, + dictWord{8, 11, 286}, + dictWord{9, 11, 667}, + dictWord{11, 11, 592}, + dictWord{ + 139, + 11, + 730, + }, + dictWord{136, 10, 617}, + dictWord{135, 0, 1120}, + dictWord{135, 11, 1146}, + dictWord{139, 10, 563}, + dictWord{4, 11, 352}, + dictWord{4, 10, 369}, + dictWord{135, 11, 687}, + dictWord{143, 11, 38}, + dictWord{4, 0, 399}, + dictWord{5, 0, 119}, + dictWord{5, 0, 494}, + dictWord{7, 0, 751}, + dictWord{9, 0, 556}, + dictWord{ + 14, + 11, + 179, + }, + dictWord{15, 11, 151}, + dictWord{150, 11, 11}, + dictWord{4, 11, 192}, + dictWord{5, 11, 49}, + dictWord{6, 11, 200}, + dictWord{6, 11, 293}, + dictWord{ + 6, + 11, + 1696, + }, + dictWord{135, 11, 488}, + dictWord{4, 0, 398}, + dictWord{133, 0, 660}, + dictWord{7, 0, 1030}, + dictWord{134, 10, 622}, + dictWord{135, 11, 595}, + dictWord{141, 0, 168}, + dictWord{132, 11, 147}, + dictWord{7, 0, 973}, + dictWord{10, 10, 624}, + dictWord{142, 10, 279}, + dictWord{132, 10, 363}, + dictWord{ + 132, + 0, + 642, + }, + dictWord{133, 11, 934}, + dictWord{134, 0, 1615}, + dictWord{7, 11, 505}, + dictWord{135, 11, 523}, + dictWord{7, 0, 594}, + dictWord{7, 0, 851}, + dictWord{ + 7, + 0, + 1858, + }, + dictWord{9, 0, 411}, + dictWord{9, 0, 574}, + dictWord{9, 0, 666}, + dictWord{9, 0, 737}, + dictWord{10, 0, 346}, + dictWord{10, 0, 712}, + dictWord{11, 0, 246}, + dictWord{11, 0, 432}, + dictWord{11, 0, 517}, + dictWord{11, 0, 647}, + dictWord{11, 0, 679}, + dictWord{11, 0, 727}, + dictWord{12, 0, 304}, + dictWord{12, 0, 305}, + dictWord{ + 12, + 0, + 323, + }, + dictWord{12, 0, 483}, + dictWord{12, 0, 572}, + dictWord{12, 0, 593}, + dictWord{12, 0, 602}, + dictWord{13, 0, 95}, + dictWord{13, 0, 101}, + dictWord{ + 13, + 0, + 171, + }, + dictWord{13, 0, 315}, + dictWord{13, 0, 378}, + dictWord{13, 0, 425}, + dictWord{13, 0, 475}, + dictWord{14, 0, 63}, + dictWord{14, 0, 380}, + dictWord{14, 0, 384}, + dictWord{15, 0, 133}, + dictWord{18, 0, 112}, + dictWord{148, 0, 72}, + dictWord{135, 0, 1093}, + dictWord{132, 0, 679}, + dictWord{8, 0, 913}, + dictWord{10, 0, 903}, + dictWord{10, 0, 915}, + dictWord{12, 0, 648}, + dictWord{12, 0, 649}, + dictWord{14, 0, 455}, + dictWord{16, 0, 112}, + dictWord{138, 11, 438}, + dictWord{137, 0, 203}, + dictWord{134, 10, 292}, + dictWord{134, 0, 1492}, + dictWord{7, 0, 1374}, + dictWord{8, 0, 540}, + dictWord{5, 10, 177}, + dictWord{6, 10, 616}, + dictWord{7, 10, 827}, + dictWord{9, 10, 525}, + dictWord{138, 10, 656}, + dictWord{135, 0, 1486}, + dictWord{9, 0, 714}, + dictWord{138, 10, 31}, + dictWord{136, 0, 825}, + dictWord{ + 134, + 0, + 1511, + }, + dictWord{132, 11, 637}, + dictWord{134, 0, 952}, + dictWord{4, 10, 161}, + dictWord{133, 10, 631}, + dictWord{5, 0, 143}, + dictWord{5, 0, 769}, + dictWord{ + 6, + 0, + 1760, + }, + dictWord{7, 0, 682}, + dictWord{7, 0, 1992}, + dictWord{136, 0, 736}, + dictWord{132, 0, 700}, + dictWord{134, 0, 1540}, + dictWord{132, 11, 777}, + dictWord{ + 9, + 11, + 867, + }, + dictWord{138, 11, 837}, + dictWord{7, 0, 1557}, + dictWord{135, 10, 1684}, + dictWord{133, 0, 860}, + dictWord{6, 0, 422}, + dictWord{7, 0, 0}, + dictWord{ + 7, + 0, + 1544, + }, + dictWord{9, 0, 605}, + dictWord{11, 0, 990}, + dictWord{12, 0, 235}, + dictWord{12, 0, 453}, + dictWord{13, 0, 47}, + dictWord{13, 0, 266}, + dictWord{9, 10, 469}, + dictWord{9, 10, 709}, + dictWord{12, 10, 512}, + dictWord{14, 10, 65}, + dictWord{145, 10, 12}, + dictWord{11, 0, 807}, + dictWord{10, 10, 229}, + dictWord{11, 10, 73}, + dictWord{139, 10, 376}, + dictWord{6, 11, 170}, + dictWord{7, 11, 1080}, + dictWord{8, 11, 395}, + dictWord{8, 11, 487}, + dictWord{11, 11, 125}, + dictWord{ + 141, + 11, + 147, + }, + dictWord{5, 0, 515}, + dictWord{137, 0, 131}, + dictWord{7, 0, 1605}, + dictWord{11, 0, 962}, + dictWord{146, 0, 139}, + dictWord{132, 0, 646}, + dictWord{ + 4, + 0, + 396, + }, + dictWord{7, 0, 728}, + dictWord{9, 0, 117}, + dictWord{13, 0, 202}, + dictWord{148, 0, 51}, + dictWord{6, 0, 121}, + dictWord{6, 0, 124}, + dictWord{6, 0, 357}, + dictWord{ + 7, + 0, + 1138, + }, + dictWord{7, 0, 1295}, + dictWord{8, 0, 162}, + dictWord{8, 0, 508}, + dictWord{11, 0, 655}, + dictWord{4, 11, 535}, + dictWord{6, 10, 558}, + dictWord{ + 7, + 10, + 651, + }, + dictWord{8, 11, 618}, + dictWord{9, 10, 0}, + dictWord{10, 10, 34}, + dictWord{139, 10, 1008}, + dictWord{135, 11, 1245}, + dictWord{138, 0, 357}, + dictWord{ + 150, + 11, + 23, + }, + dictWord{133, 0, 237}, + dictWord{135, 0, 1784}, + dictWord{7, 10, 1832}, + dictWord{138, 10, 374}, + dictWord{132, 0, 713}, + dictWord{132, 11, 46}, + dictWord{6, 0, 1536}, + dictWord{10, 0, 348}, + dictWord{5, 11, 811}, + dictWord{6, 11, 1679}, + dictWord{6, 11, 1714}, + dictWord{135, 11, 2032}, + dictWord{ + 11, + 11, + 182, + }, + dictWord{142, 11, 195}, + dictWord{6, 0, 523}, + dictWord{7, 0, 738}, + dictWord{7, 10, 771}, + dictWord{7, 10, 1731}, + dictWord{9, 10, 405}, + dictWord{ + 138, + 10, + 421, + }, + dictWord{7, 11, 1458}, + dictWord{9, 11, 407}, + dictWord{139, 11, 15}, + dictWord{6, 11, 34}, + dictWord{7, 11, 69}, + dictWord{7, 11, 640}, + dictWord{ + 7, + 11, + 1089, + }, + dictWord{8, 11, 708}, + dictWord{8, 11, 721}, + dictWord{9, 11, 363}, + dictWord{9, 11, 643}, + dictWord{10, 11, 628}, + dictWord{148, 11, 98}, + dictWord{ + 133, + 0, + 434, + }, + dictWord{135, 0, 1877}, + dictWord{7, 0, 571}, + dictWord{138, 0, 366}, + dictWord{5, 10, 881}, + dictWord{133, 10, 885}, + dictWord{9, 0, 513}, + dictWord{ + 10, + 0, + 25, + }, + dictWord{10, 0, 39}, + dictWord{12, 0, 122}, + dictWord{140, 0, 187}, + dictWord{132, 0, 580}, + dictWord{5, 10, 142}, + dictWord{134, 10, 546}, + dictWord{ + 132, + 11, + 462, + }, + dictWord{137, 0, 873}, + dictWord{5, 10, 466}, + dictWord{11, 10, 571}, + dictWord{12, 10, 198}, + dictWord{13, 10, 283}, + dictWord{14, 10, 186}, + dictWord{15, 10, 21}, + dictWord{143, 10, 103}, + dictWord{7, 0, 171}, + dictWord{4, 10, 185}, + dictWord{5, 10, 257}, + dictWord{5, 10, 839}, + dictWord{5, 10, 936}, + dictWord{ + 9, + 10, + 399, + }, + dictWord{10, 10, 258}, + dictWord{10, 10, 395}, + dictWord{10, 10, 734}, + dictWord{11, 10, 1014}, + dictWord{12, 10, 23}, + dictWord{13, 10, 350}, + dictWord{14, 10, 150}, + dictWord{147, 10, 6}, + dictWord{134, 0, 625}, + dictWord{7, 0, 107}, + dictWord{7, 0, 838}, + dictWord{8, 0, 550}, + dictWord{138, 0, 401}, + dictWord{ + 5, + 11, + 73, + }, + dictWord{6, 11, 23}, + dictWord{134, 11, 338}, + dictWord{4, 0, 943}, + dictWord{6, 0, 1850}, + dictWord{12, 0, 713}, + dictWord{142, 0, 434}, + dictWord{ + 11, + 0, + 588, + }, + dictWord{11, 0, 864}, + dictWord{11, 0, 936}, + dictWord{11, 0, 968}, + dictWord{12, 0, 73}, + dictWord{12, 0, 343}, + dictWord{12, 0, 394}, + dictWord{13, 0, 275}, + dictWord{14, 0, 257}, + dictWord{15, 0, 160}, + dictWord{7, 10, 404}, + dictWord{7, 10, 1377}, + dictWord{7, 10, 1430}, + dictWord{7, 10, 2017}, + dictWord{8, 10, 149}, + dictWord{8, 10, 239}, + dictWord{8, 10, 512}, + dictWord{8, 10, 793}, + dictWord{8, 10, 818}, + dictWord{9, 10, 474}, + dictWord{9, 10, 595}, + dictWord{10, 10, 122}, + dictWord{10, 10, 565}, + dictWord{10, 10, 649}, + dictWord{10, 10, 783}, + dictWord{11, 10, 239}, + dictWord{11, 10, 295}, + dictWord{11, 10, 447}, + dictWord{ + 11, + 10, + 528, + }, + dictWord{11, 10, 639}, + dictWord{11, 10, 800}, + dictWord{12, 10, 25}, + dictWord{12, 10, 157}, + dictWord{12, 10, 316}, + dictWord{12, 10, 390}, + dictWord{ + 12, + 10, + 391, + }, + dictWord{12, 10, 395}, + dictWord{12, 10, 478}, + dictWord{12, 10, 503}, + dictWord{12, 10, 592}, + dictWord{12, 10, 680}, + dictWord{13, 10, 50}, + dictWord{13, 10, 53}, + dictWord{13, 10, 132}, + dictWord{13, 10, 198}, + dictWord{13, 10, 322}, + dictWord{13, 10, 415}, + dictWord{13, 10, 511}, + dictWord{14, 10, 71}, + dictWord{14, 10, 395}, + dictWord{15, 10, 71}, + dictWord{15, 10, 136}, + dictWord{17, 10, 123}, + dictWord{18, 10, 93}, + dictWord{147, 10, 58}, + dictWord{ + 133, + 0, + 768, + }, + dictWord{11, 0, 103}, + dictWord{142, 0, 0}, + dictWord{136, 10, 712}, + dictWord{132, 0, 799}, + dictWord{132, 0, 894}, + dictWord{7, 11, 725}, + dictWord{ + 8, + 11, + 498, + }, + dictWord{139, 11, 268}, + dictWord{135, 11, 1798}, + dictWord{135, 11, 773}, + dictWord{141, 11, 360}, + dictWord{4, 10, 377}, + dictWord{152, 10, 13}, + dictWord{135, 0, 1673}, + dictWord{132, 11, 583}, + dictWord{134, 0, 1052}, + dictWord{133, 11, 220}, + dictWord{140, 11, 69}, + dictWord{132, 11, 544}, + dictWord{ + 4, + 10, + 180, + }, + dictWord{135, 10, 1906}, + dictWord{134, 0, 272}, + dictWord{4, 0, 441}, + dictWord{134, 0, 1421}, + dictWord{4, 0, 9}, + dictWord{5, 0, 128}, + dictWord{ + 7, + 0, + 368, + }, + dictWord{11, 0, 480}, + dictWord{148, 0, 3}, + dictWord{5, 11, 176}, + dictWord{6, 11, 437}, + dictWord{6, 11, 564}, + dictWord{11, 11, 181}, + dictWord{ + 141, + 11, + 183, + }, + dictWord{132, 10, 491}, + dictWord{7, 0, 1182}, + dictWord{141, 11, 67}, + dictWord{6, 0, 1346}, + dictWord{4, 10, 171}, + dictWord{138, 10, 234}, + dictWord{ + 4, + 10, + 586, + }, + dictWord{7, 10, 1186}, + dictWord{138, 10, 631}, + dictWord{136, 0, 682}, + dictWord{134, 0, 1004}, + dictWord{15, 0, 24}, + dictWord{143, 11, 24}, + dictWord{134, 0, 968}, + dictWord{4, 0, 2}, + dictWord{6, 0, 742}, + dictWord{6, 0, 793}, + dictWord{7, 0, 545}, + dictWord{7, 0, 894}, + dictWord{9, 10, 931}, + dictWord{ + 10, + 10, + 334, + }, + dictWord{148, 10, 71}, + dictWord{136, 11, 600}, + dictWord{133, 10, 765}, + dictWord{9, 0, 769}, + dictWord{140, 0, 185}, + dictWord{4, 11, 790}, + dictWord{ + 5, + 11, + 273, + }, + dictWord{134, 11, 394}, + dictWord{7, 0, 474}, + dictWord{137, 0, 578}, + dictWord{4, 11, 135}, + dictWord{6, 11, 127}, + dictWord{7, 11, 1185}, + dictWord{ + 7, + 11, + 1511, + }, + dictWord{8, 11, 613}, + dictWord{11, 11, 5}, + dictWord{12, 11, 133}, + dictWord{12, 11, 495}, + dictWord{12, 11, 586}, + dictWord{14, 11, 385}, + dictWord{15, 11, 118}, + dictWord{17, 11, 20}, + dictWord{146, 11, 98}, + dictWord{133, 10, 424}, + dictWord{5, 0, 530}, + dictWord{142, 0, 113}, + dictWord{6, 11, 230}, + dictWord{7, 11, 961}, + dictWord{7, 11, 1085}, + dictWord{136, 11, 462}, + dictWord{7, 11, 1954}, + dictWord{137, 11, 636}, + dictWord{136, 10, 714}, + dictWord{ + 149, + 11, + 6, + }, + dictWord{135, 10, 685}, + dictWord{9, 10, 420}, + dictWord{10, 10, 269}, + dictWord{10, 10, 285}, + dictWord{10, 10, 576}, + dictWord{11, 10, 397}, + dictWord{13, 10, 175}, + dictWord{145, 10, 90}, + dictWord{132, 10, 429}, + dictWord{5, 0, 556}, + dictWord{5, 11, 162}, + dictWord{136, 11, 68}, + dictWord{132, 11, 654}, + dictWord{4, 11, 156}, + dictWord{7, 11, 998}, + dictWord{7, 11, 1045}, + dictWord{7, 11, 1860}, + dictWord{9, 11, 48}, + dictWord{9, 11, 692}, + dictWord{11, 11, 419}, + dictWord{139, 11, 602}, + dictWord{6, 0, 1317}, + dictWord{8, 0, 16}, + dictWord{9, 0, 825}, + dictWord{12, 0, 568}, + dictWord{7, 11, 1276}, + dictWord{8, 11, 474}, + dictWord{137, 11, 652}, + dictWord{18, 0, 97}, + dictWord{7, 10, 18}, + dictWord{7, 10, 699}, + dictWord{7, 10, 1966}, + dictWord{8, 10, 752}, + dictWord{9, 10, 273}, + dictWord{ + 9, + 10, + 412, + }, + dictWord{9, 10, 703}, + dictWord{10, 10, 71}, + dictWord{10, 10, 427}, + dictWord{138, 10, 508}, + dictWord{10, 0, 703}, + dictWord{7, 11, 1454}, + dictWord{138, 11, 703}, + dictWord{4, 10, 53}, + dictWord{5, 10, 186}, + dictWord{135, 10, 752}, + dictWord{134, 0, 892}, + dictWord{134, 0, 1571}, + dictWord{8, 10, 575}, + dictWord{10, 10, 289}, + dictWord{139, 10, 319}, + dictWord{6, 0, 186}, + dictWord{137, 0, 426}, + dictWord{134, 0, 1101}, + dictWord{132, 10, 675}, + dictWord{ + 132, + 0, + 585, + }, + dictWord{6, 0, 1870}, + dictWord{137, 0, 937}, + dictWord{152, 11, 10}, + dictWord{9, 11, 197}, + dictWord{10, 11, 300}, + dictWord{12, 11, 473}, + dictWord{ + 13, + 11, + 90, + }, + dictWord{141, 11, 405}, + dictWord{4, 0, 93}, + dictWord{5, 0, 252}, + dictWord{6, 0, 229}, + dictWord{7, 0, 291}, + dictWord{9, 0, 550}, + dictWord{139, 0, 644}, + dictWord{137, 0, 749}, + dictWord{9, 0, 162}, + dictWord{6, 10, 209}, + dictWord{8, 10, 468}, + dictWord{9, 10, 210}, + dictWord{11, 10, 36}, + dictWord{12, 10, 28}, + dictWord{12, 10, 630}, + dictWord{13, 10, 21}, + dictWord{13, 10, 349}, + dictWord{14, 10, 7}, + dictWord{145, 10, 13}, + dictWord{132, 0, 381}, + dictWord{132, 11, 606}, + dictWord{4, 10, 342}, + dictWord{135, 10, 1179}, + dictWord{7, 11, 1587}, + dictWord{7, 11, 1707}, + dictWord{10, 11, 528}, + dictWord{139, 11, 504}, + dictWord{ + 12, + 11, + 39, + }, + dictWord{13, 11, 265}, + dictWord{141, 11, 439}, + dictWord{4, 10, 928}, + dictWord{133, 10, 910}, + dictWord{7, 10, 1838}, + dictWord{7, 11, 1978}, + dictWord{136, 11, 676}, + dictWord{6, 0, 762}, + dictWord{6, 0, 796}, + dictWord{134, 0, 956}, + dictWord{4, 10, 318}, + dictWord{4, 10, 496}, + dictWord{7, 10, 856}, + dictWord{139, 10, 654}, + dictWord{137, 11, 242}, + dictWord{4, 11, 361}, + dictWord{133, 11, 315}, + dictWord{132, 11, 461}, + dictWord{132, 11, 472}, + dictWord{ + 132, + 0, + 857, + }, + dictWord{5, 0, 21}, + dictWord{6, 0, 77}, + dictWord{6, 0, 157}, + dictWord{7, 0, 974}, + dictWord{7, 0, 1301}, + dictWord{7, 0, 1339}, + dictWord{7, 0, 1490}, + dictWord{ + 7, + 0, + 1873, + }, + dictWord{9, 0, 628}, + dictWord{7, 10, 915}, + dictWord{8, 10, 247}, + dictWord{147, 10, 0}, + dictWord{4, 10, 202}, + dictWord{5, 10, 382}, + dictWord{ + 6, + 10, + 454, + }, + dictWord{7, 10, 936}, + dictWord{7, 10, 1803}, + dictWord{8, 10, 758}, + dictWord{9, 10, 375}, + dictWord{9, 10, 895}, + dictWord{10, 10, 743}, + dictWord{ + 10, + 10, + 792, + }, + dictWord{11, 10, 978}, + dictWord{11, 10, 1012}, + dictWord{142, 10, 109}, + dictWord{7, 11, 617}, + dictWord{10, 11, 498}, + dictWord{11, 11, 501}, + dictWord{12, 11, 16}, + dictWord{140, 11, 150}, + dictWord{7, 10, 1150}, + dictWord{7, 10, 1425}, + dictWord{7, 10, 1453}, + dictWord{10, 11, 747}, + dictWord{ + 140, + 10, + 513, + }, + dictWord{133, 11, 155}, + dictWord{11, 0, 919}, + dictWord{141, 0, 409}, + dictWord{138, 10, 791}, + dictWord{10, 0, 633}, + dictWord{139, 11, 729}, + dictWord{ + 7, + 11, + 163, + }, + dictWord{8, 11, 319}, + dictWord{9, 11, 402}, + dictWord{10, 11, 24}, + dictWord{10, 11, 681}, + dictWord{11, 11, 200}, + dictWord{11, 11, 567}, + dictWord{12, 11, 253}, + dictWord{12, 11, 410}, + dictWord{142, 11, 219}, + dictWord{5, 11, 475}, + dictWord{7, 11, 1780}, + dictWord{9, 11, 230}, + dictWord{11, 11, 297}, + dictWord{11, 11, 558}, + dictWord{14, 11, 322}, + dictWord{147, 11, 76}, + dictWord{7, 0, 332}, + dictWord{6, 10, 445}, + dictWord{137, 10, 909}, + dictWord{ + 135, + 11, + 1956, + }, + dictWord{136, 11, 274}, + dictWord{134, 10, 578}, + dictWord{135, 0, 1489}, + dictWord{135, 11, 1848}, + dictWord{5, 11, 944}, + dictWord{ + 134, + 11, + 1769, + }, + dictWord{132, 11, 144}, + dictWord{136, 10, 766}, + dictWord{4, 0, 832}, + dictWord{135, 10, 541}, + dictWord{8, 0, 398}, + dictWord{9, 0, 681}, + dictWord{ + 139, + 0, + 632, + }, + dictWord{136, 0, 645}, + dictWord{9, 0, 791}, + dictWord{10, 0, 93}, + dictWord{16, 0, 13}, + dictWord{17, 0, 23}, + dictWord{18, 0, 135}, + dictWord{19, 0, 12}, + dictWord{20, 0, 1}, + dictWord{20, 0, 12}, + dictWord{148, 0, 14}, + dictWord{6, 11, 247}, + dictWord{137, 11, 555}, + dictWord{134, 0, 20}, + dictWord{132, 0, 800}, + dictWord{135, 0, 1841}, + dictWord{139, 10, 983}, + dictWord{137, 10, 768}, + dictWord{132, 10, 584}, + dictWord{141, 11, 51}, + dictWord{6, 0, 1993}, + dictWord{ + 4, + 11, + 620, + }, + dictWord{138, 11, 280}, + dictWord{136, 0, 769}, + dictWord{11, 0, 290}, + dictWord{11, 0, 665}, + dictWord{7, 11, 1810}, + dictWord{11, 11, 866}, + dictWord{ + 12, + 11, + 103, + }, + dictWord{13, 11, 495}, + dictWord{17, 11, 67}, + dictWord{147, 11, 74}, + dictWord{134, 0, 1426}, + dictWord{139, 0, 60}, + dictWord{4, 10, 326}, + dictWord{135, 10, 1770}, + dictWord{7, 0, 1874}, + dictWord{9, 0, 641}, + dictWord{132, 10, 226}, + dictWord{6, 0, 644}, + dictWord{5, 10, 426}, + dictWord{8, 10, 30}, + dictWord{ + 9, + 10, + 2, + }, + dictWord{11, 10, 549}, + dictWord{147, 10, 122}, + dictWord{5, 11, 428}, + dictWord{138, 11, 442}, + dictWord{135, 11, 1871}, + dictWord{ + 135, + 0, + 1757, + }, + dictWord{147, 10, 117}, + dictWord{135, 0, 937}, + dictWord{135, 0, 1652}, + dictWord{6, 0, 654}, + dictWord{134, 0, 1476}, + dictWord{133, 11, 99}, + dictWord{135, 0, 527}, + dictWord{132, 10, 345}, + dictWord{4, 10, 385}, + dictWord{4, 11, 397}, + dictWord{7, 10, 265}, + dictWord{135, 10, 587}, + dictWord{4, 0, 579}, + dictWord{5, 0, 226}, + dictWord{5, 0, 323}, + dictWord{135, 0, 960}, + dictWord{134, 0, 1486}, + dictWord{8, 11, 502}, + dictWord{144, 11, 9}, + dictWord{4, 10, 347}, + dictWord{ + 5, + 10, + 423, + }, + dictWord{5, 10, 996}, + dictWord{135, 10, 1329}, + dictWord{7, 11, 727}, + dictWord{146, 11, 73}, + dictWord{4, 11, 485}, + dictWord{7, 11, 353}, + dictWord{7, 10, 1259}, + dictWord{7, 11, 1523}, + dictWord{9, 10, 125}, + dictWord{139, 10, 65}, + dictWord{6, 0, 325}, + dictWord{5, 10, 136}, + dictWord{6, 11, 366}, + dictWord{ + 7, + 11, + 1384, + }, + dictWord{7, 11, 1601}, + dictWord{136, 10, 644}, + dictWord{138, 11, 160}, + dictWord{6, 0, 1345}, + dictWord{137, 11, 282}, + dictWord{18, 0, 91}, + dictWord{147, 0, 70}, + dictWord{136, 0, 404}, + dictWord{4, 11, 157}, + dictWord{133, 11, 471}, + dictWord{133, 0, 973}, + dictWord{6, 0, 135}, + dictWord{ + 135, + 0, + 1176, + }, + dictWord{8, 11, 116}, + dictWord{11, 11, 551}, + dictWord{142, 11, 159}, + dictWord{4, 0, 549}, + dictWord{4, 10, 433}, + dictWord{133, 10, 719}, + dictWord{ + 136, + 0, + 976, + }, + dictWord{5, 11, 160}, + dictWord{7, 11, 363}, + dictWord{7, 11, 589}, + dictWord{10, 11, 170}, + dictWord{141, 11, 55}, + dictWord{144, 0, 21}, + dictWord{ + 144, + 0, + 51, + }, + dictWord{135, 0, 314}, + dictWord{135, 10, 1363}, + dictWord{4, 11, 108}, + dictWord{7, 11, 405}, + dictWord{10, 11, 491}, + dictWord{139, 11, 498}, + dictWord{146, 0, 4}, + dictWord{4, 10, 555}, + dictWord{8, 10, 536}, + dictWord{10, 10, 288}, + dictWord{139, 10, 1005}, + dictWord{135, 11, 1005}, + dictWord{6, 0, 281}, + dictWord{7, 0, 6}, + dictWord{8, 0, 282}, + dictWord{8, 0, 480}, + dictWord{8, 0, 499}, + dictWord{9, 0, 198}, + dictWord{10, 0, 143}, + dictWord{10, 0, 169}, + dictWord{ + 10, + 0, + 211, + }, + dictWord{10, 0, 417}, + dictWord{10, 0, 574}, + dictWord{11, 0, 147}, + dictWord{11, 0, 395}, + dictWord{12, 0, 75}, + dictWord{12, 0, 407}, + dictWord{12, 0, 608}, + dictWord{13, 0, 500}, + dictWord{142, 0, 251}, + dictWord{6, 0, 1093}, + dictWord{6, 0, 1405}, + dictWord{9, 10, 370}, + dictWord{138, 10, 90}, + dictWord{4, 11, 926}, + dictWord{133, 11, 983}, + dictWord{135, 0, 1776}, + dictWord{134, 0, 1528}, + dictWord{132, 0, 419}, + dictWord{132, 11, 538}, + dictWord{6, 11, 294}, + dictWord{ + 7, + 11, + 1267, + }, + dictWord{136, 11, 624}, + dictWord{135, 11, 1772}, + dictWord{138, 11, 301}, + dictWord{4, 10, 257}, + dictWord{135, 10, 2031}, + dictWord{4, 0, 138}, + dictWord{7, 0, 1012}, + dictWord{7, 0, 1280}, + dictWord{9, 0, 76}, + dictWord{135, 10, 1768}, + dictWord{132, 11, 757}, + dictWord{5, 0, 29}, + dictWord{140, 0, 638}, + dictWord{7, 11, 655}, + dictWord{135, 11, 1844}, + dictWord{7, 0, 1418}, + dictWord{6, 11, 257}, + dictWord{135, 11, 1522}, + dictWord{8, 11, 469}, + dictWord{ + 138, + 11, + 47, + }, + dictWord{142, 11, 278}, + dictWord{6, 10, 83}, + dictWord{6, 10, 1733}, + dictWord{135, 10, 1389}, + dictWord{11, 11, 204}, + dictWord{11, 11, 243}, + dictWord{140, 11, 293}, + dictWord{135, 11, 1875}, + dictWord{6, 0, 1710}, + dictWord{135, 0, 2038}, + dictWord{137, 11, 299}, + dictWord{4, 0, 17}, + dictWord{5, 0, 23}, + dictWord{7, 0, 995}, + dictWord{11, 0, 383}, + dictWord{11, 0, 437}, + dictWord{12, 0, 460}, + dictWord{140, 0, 532}, + dictWord{133, 0, 862}, + dictWord{137, 10, 696}, + dictWord{6, 0, 592}, + dictWord{138, 0, 946}, + dictWord{138, 11, 599}, + dictWord{7, 10, 1718}, + dictWord{9, 10, 95}, + dictWord{9, 10, 274}, + dictWord{10, 10, 279}, + dictWord{10, 10, 317}, + dictWord{10, 10, 420}, + dictWord{11, 10, 303}, + dictWord{11, 10, 808}, + dictWord{12, 10, 134}, + dictWord{12, 10, 367}, + dictWord{ + 13, + 10, + 149, + }, + dictWord{13, 10, 347}, + dictWord{14, 10, 349}, + dictWord{14, 10, 406}, + dictWord{18, 10, 22}, + dictWord{18, 10, 89}, + dictWord{18, 10, 122}, + dictWord{ + 147, + 10, + 47, + }, + dictWord{8, 0, 70}, + dictWord{12, 0, 171}, + dictWord{141, 0, 272}, + dictWord{133, 10, 26}, + dictWord{132, 10, 550}, + dictWord{137, 0, 812}, + dictWord{ + 10, + 0, + 233, + }, + dictWord{139, 0, 76}, + dictWord{134, 0, 988}, + dictWord{134, 0, 442}, + dictWord{136, 10, 822}, + dictWord{7, 0, 896}, + dictWord{4, 10, 902}, + dictWord{ + 5, + 10, + 809, + }, + dictWord{134, 10, 122}, + dictWord{5, 11, 150}, + dictWord{7, 11, 106}, + dictWord{8, 11, 603}, + dictWord{9, 11, 593}, + dictWord{9, 11, 634}, + dictWord{ + 10, + 11, + 44, + }, + dictWord{10, 11, 173}, + dictWord{11, 11, 462}, + dictWord{11, 11, 515}, + dictWord{13, 11, 216}, + dictWord{13, 11, 288}, + dictWord{142, 11, 400}, + dictWord{136, 0, 483}, + dictWord{135, 10, 262}, + dictWord{6, 0, 1709}, + dictWord{133, 10, 620}, + dictWord{4, 10, 34}, + dictWord{5, 10, 574}, + dictWord{7, 10, 279}, + dictWord{7, 10, 1624}, + dictWord{136, 10, 601}, + dictWord{137, 10, 170}, + dictWord{147, 0, 119}, + dictWord{12, 11, 108}, + dictWord{141, 11, 291}, + dictWord{ + 11, + 0, + 69, + }, + dictWord{12, 0, 105}, + dictWord{12, 0, 117}, + dictWord{13, 0, 213}, + dictWord{14, 0, 13}, + dictWord{14, 0, 62}, + dictWord{14, 0, 177}, + dictWord{14, 0, 421}, + dictWord{15, 0, 19}, + dictWord{146, 0, 141}, + dictWord{137, 0, 309}, + dictWord{11, 11, 278}, + dictWord{142, 11, 73}, + dictWord{7, 0, 608}, + dictWord{7, 0, 976}, + dictWord{9, 0, 146}, + dictWord{10, 0, 206}, + dictWord{10, 0, 596}, + dictWord{13, 0, 218}, + dictWord{142, 0, 153}, + dictWord{133, 10, 332}, + dictWord{6, 10, 261}, + dictWord{ + 8, + 10, + 182, + }, + dictWord{139, 10, 943}, + dictWord{4, 11, 493}, + dictWord{144, 11, 55}, + dictWord{134, 10, 1721}, + dictWord{132, 0, 768}, + dictWord{4, 10, 933}, + dictWord{133, 10, 880}, + dictWord{7, 11, 555}, + dictWord{7, 11, 1316}, + dictWord{7, 11, 1412}, + dictWord{7, 11, 1839}, + dictWord{9, 11, 192}, + dictWord{ + 9, + 11, + 589, + }, + dictWord{11, 11, 241}, + dictWord{11, 11, 676}, + dictWord{11, 11, 811}, + dictWord{11, 11, 891}, + dictWord{12, 11, 140}, + dictWord{12, 11, 346}, + dictWord{ + 12, + 11, + 479, + }, + dictWord{13, 11, 30}, + dictWord{13, 11, 49}, + dictWord{13, 11, 381}, + dictWord{14, 11, 188}, + dictWord{15, 11, 150}, + dictWord{16, 11, 76}, + dictWord{18, 11, 30}, + dictWord{148, 11, 52}, + dictWord{4, 0, 518}, + dictWord{135, 0, 1136}, + dictWord{6, 11, 568}, + dictWord{7, 11, 112}, + dictWord{7, 11, 1804}, + dictWord{8, 11, 362}, + dictWord{8, 11, 410}, + dictWord{8, 11, 830}, + dictWord{9, 11, 514}, + dictWord{11, 11, 649}, + dictWord{142, 11, 157}, + dictWord{135, 11, 673}, + dictWord{8, 0, 689}, + dictWord{137, 0, 863}, + dictWord{4, 0, 18}, + dictWord{7, 0, 145}, + dictWord{7, 0, 444}, + dictWord{7, 0, 1278}, + dictWord{8, 0, 49}, + dictWord{8, 0, 400}, + dictWord{9, 0, 71}, + dictWord{9, 0, 250}, + dictWord{10, 0, 459}, + dictWord{12, 0, 160}, + dictWord{16, 0, 24}, + dictWord{132, 11, 625}, + dictWord{140, 0, 1020}, + dictWord{4, 0, 997}, + dictWord{6, 0, 1946}, + dictWord{6, 0, 1984}, + dictWord{134, 0, 1998}, + dictWord{6, 11, 16}, + dictWord{6, 11, 158}, + dictWord{7, 11, 43}, + dictWord{ + 7, + 11, + 129, + }, + dictWord{7, 11, 181}, + dictWord{8, 11, 276}, + dictWord{8, 11, 377}, + dictWord{10, 11, 523}, + dictWord{11, 11, 816}, + dictWord{12, 11, 455}, + dictWord{ + 13, + 11, + 303, + }, + dictWord{142, 11, 135}, + dictWord{133, 10, 812}, + dictWord{134, 0, 658}, + dictWord{4, 11, 1}, + dictWord{7, 11, 1143}, + dictWord{7, 11, 1463}, + dictWord{8, 11, 61}, + dictWord{9, 11, 207}, + dictWord{9, 11, 390}, + dictWord{9, 11, 467}, + dictWord{139, 11, 836}, + dictWord{150, 11, 26}, + dictWord{140, 0, 106}, + dictWord{6, 0, 1827}, + dictWord{10, 0, 931}, + dictWord{18, 0, 166}, + dictWord{20, 0, 114}, + dictWord{4, 10, 137}, + dictWord{7, 10, 1178}, + dictWord{7, 11, 1319}, + dictWord{135, 10, 1520}, + dictWord{133, 0, 1010}, + dictWord{4, 11, 723}, + dictWord{5, 11, 895}, + dictWord{7, 11, 1031}, + dictWord{8, 11, 199}, + dictWord{8, 11, 340}, + dictWord{9, 11, 153}, + dictWord{9, 11, 215}, + dictWord{10, 11, 21}, + dictWord{10, 11, 59}, + dictWord{10, 11, 80}, + dictWord{10, 11, 224}, + dictWord{11, 11, 229}, + dictWord{11, 11, 652}, + dictWord{12, 11, 192}, + dictWord{13, 11, 146}, + dictWord{142, 11, 91}, + dictWord{132, 11, 295}, + dictWord{6, 11, 619}, + dictWord{ + 7, + 11, + 898, + }, + dictWord{7, 11, 1092}, + dictWord{8, 11, 485}, + dictWord{18, 11, 28}, + dictWord{147, 11, 116}, + dictWord{137, 11, 51}, + dictWord{6, 10, 1661}, + dictWord{ + 7, + 10, + 1975, + }, + dictWord{7, 10, 2009}, + dictWord{135, 10, 2011}, + dictWord{5, 11, 309}, + dictWord{140, 11, 211}, + dictWord{5, 0, 87}, + dictWord{7, 0, 313}, + dictWord{ + 7, + 0, + 1103, + }, + dictWord{10, 0, 208}, + dictWord{10, 0, 582}, + dictWord{11, 0, 389}, + dictWord{11, 0, 813}, + dictWord{12, 0, 385}, + dictWord{13, 0, 286}, + dictWord{ + 14, + 0, + 124, + }, + dictWord{146, 0, 108}, + dictWord{5, 11, 125}, + dictWord{8, 11, 77}, + dictWord{138, 11, 15}, + dictWord{132, 0, 267}, + dictWord{133, 0, 703}, + dictWord{ + 137, + 11, + 155, + }, + dictWord{133, 11, 439}, + dictWord{11, 11, 164}, + dictWord{140, 11, 76}, + dictWord{9, 0, 496}, + dictWord{5, 10, 89}, + dictWord{7, 10, 1915}, + dictWord{ + 9, + 10, + 185, + }, + dictWord{9, 10, 235}, + dictWord{10, 10, 64}, + dictWord{10, 10, 270}, + dictWord{10, 10, 403}, + dictWord{10, 10, 469}, + dictWord{10, 10, 529}, + dictWord{10, 10, 590}, + dictWord{11, 10, 140}, + dictWord{11, 10, 860}, + dictWord{13, 10, 1}, + dictWord{13, 10, 422}, + dictWord{14, 10, 341}, + dictWord{14, 10, 364}, + dictWord{17, 10, 93}, + dictWord{18, 10, 113}, + dictWord{19, 10, 97}, + dictWord{147, 10, 113}, + dictWord{133, 10, 695}, + dictWord{135, 0, 1121}, + dictWord{ + 5, + 10, + 6, + }, + dictWord{6, 10, 183}, + dictWord{7, 10, 680}, + dictWord{7, 10, 978}, + dictWord{7, 10, 1013}, + dictWord{7, 10, 1055}, + dictWord{12, 10, 230}, + dictWord{ + 13, + 10, + 172, + }, + dictWord{146, 10, 29}, + dictWord{4, 11, 8}, + dictWord{7, 11, 1152}, + dictWord{7, 11, 1153}, + dictWord{7, 11, 1715}, + dictWord{9, 11, 374}, + dictWord{ + 10, + 11, + 478, + }, + dictWord{139, 11, 648}, + dictWord{135, 11, 1099}, + dictWord{6, 10, 29}, + dictWord{139, 10, 63}, + dictWord{4, 0, 561}, + dictWord{10, 0, 249}, + dictWord{ + 139, + 0, + 209, + }, + dictWord{132, 0, 760}, + dictWord{7, 11, 799}, + dictWord{138, 11, 511}, + dictWord{136, 11, 87}, + dictWord{9, 0, 154}, + dictWord{140, 0, 485}, + dictWord{136, 0, 255}, + dictWord{132, 0, 323}, + dictWord{140, 0, 419}, + dictWord{132, 10, 311}, + dictWord{134, 10, 1740}, + dictWord{4, 0, 368}, + dictWord{ + 135, + 0, + 641, + }, + dictWord{7, 10, 170}, + dictWord{8, 10, 90}, + dictWord{8, 10, 177}, + dictWord{8, 10, 415}, + dictWord{11, 10, 714}, + dictWord{142, 10, 281}, + dictWord{ + 4, + 11, + 69, + }, + dictWord{5, 11, 122}, + dictWord{9, 11, 656}, + dictWord{138, 11, 464}, + dictWord{5, 11, 849}, + dictWord{134, 11, 1633}, + dictWord{8, 0, 522}, + dictWord{ + 142, + 0, + 328, + }, + dictWord{11, 10, 91}, + dictWord{13, 10, 129}, + dictWord{15, 10, 101}, + dictWord{145, 10, 125}, + dictWord{7, 0, 562}, + dictWord{8, 0, 551}, + dictWord{ + 4, + 10, + 494, + }, + dictWord{6, 10, 74}, + dictWord{7, 10, 44}, + dictWord{11, 11, 499}, + dictWord{12, 10, 17}, + dictWord{15, 10, 5}, + dictWord{148, 10, 11}, + dictWord{4, 10, 276}, + dictWord{133, 10, 296}, + dictWord{9, 0, 92}, + dictWord{147, 0, 91}, + dictWord{4, 10, 7}, + dictWord{5, 10, 90}, + dictWord{5, 10, 158}, + dictWord{6, 10, 542}, + dictWord{ + 7, + 10, + 221, + }, + dictWord{7, 10, 1574}, + dictWord{9, 10, 490}, + dictWord{10, 10, 540}, + dictWord{11, 10, 443}, + dictWord{139, 10, 757}, + dictWord{6, 0, 525}, + dictWord{ + 6, + 0, + 1976, + }, + dictWord{8, 0, 806}, + dictWord{9, 0, 876}, + dictWord{140, 0, 284}, + dictWord{5, 11, 859}, + dictWord{7, 10, 588}, + dictWord{7, 11, 1160}, + dictWord{ + 8, + 11, + 107, + }, + dictWord{9, 10, 175}, + dictWord{9, 11, 291}, + dictWord{9, 11, 439}, + dictWord{10, 10, 530}, + dictWord{10, 11, 663}, + dictWord{11, 11, 609}, + dictWord{ + 140, + 11, + 197, + }, + dictWord{7, 11, 168}, + dictWord{13, 11, 196}, + dictWord{141, 11, 237}, + dictWord{139, 0, 958}, + dictWord{133, 0, 594}, + dictWord{135, 10, 580}, + dictWord{7, 10, 88}, + dictWord{136, 10, 627}, + dictWord{6, 0, 479}, + dictWord{6, 0, 562}, + dictWord{7, 0, 1060}, + dictWord{13, 0, 6}, + dictWord{5, 10, 872}, + dictWord{ + 6, + 10, + 57, + }, + dictWord{7, 10, 471}, + dictWord{9, 10, 447}, + dictWord{137, 10, 454}, + dictWord{136, 11, 413}, + dictWord{145, 11, 19}, + dictWord{4, 11, 117}, + dictWord{ + 6, + 11, + 372, + }, + dictWord{7, 11, 1905}, + dictWord{142, 11, 323}, + dictWord{4, 11, 722}, + dictWord{139, 11, 471}, + dictWord{17, 0, 61}, + dictWord{5, 10, 31}, + dictWord{134, 10, 614}, + dictWord{8, 10, 330}, + dictWord{140, 10, 477}, + dictWord{7, 10, 1200}, + dictWord{138, 10, 460}, + dictWord{6, 10, 424}, + dictWord{ + 135, + 10, + 1866, + }, + dictWord{6, 0, 1641}, + dictWord{136, 0, 820}, + dictWord{6, 0, 1556}, + dictWord{134, 0, 1618}, + dictWord{9, 11, 5}, + dictWord{12, 11, 216}, + dictWord{ + 12, + 11, + 294, + }, + dictWord{12, 11, 298}, + dictWord{12, 11, 400}, + dictWord{12, 11, 518}, + dictWord{13, 11, 229}, + dictWord{143, 11, 139}, + dictWord{15, 11, 155}, + dictWord{144, 11, 79}, + dictWord{4, 0, 302}, + dictWord{135, 0, 1766}, + dictWord{5, 10, 13}, + dictWord{134, 10, 142}, + dictWord{6, 0, 148}, + dictWord{7, 0, 1313}, + dictWord{ + 7, + 10, + 116, + }, + dictWord{8, 10, 322}, + dictWord{8, 10, 755}, + dictWord{9, 10, 548}, + dictWord{10, 10, 714}, + dictWord{11, 10, 884}, + dictWord{141, 10, 324}, + dictWord{137, 0, 676}, + dictWord{9, 11, 88}, + dictWord{139, 11, 270}, + dictWord{5, 11, 12}, + dictWord{7, 11, 375}, + dictWord{137, 11, 438}, + dictWord{134, 0, 1674}, + dictWord{7, 10, 1472}, + dictWord{135, 10, 1554}, + dictWord{11, 0, 178}, + dictWord{7, 10, 1071}, + dictWord{7, 10, 1541}, + dictWord{7, 10, 1767}, + dictWord{ + 7, + 10, + 1806, + }, + dictWord{11, 10, 162}, + dictWord{11, 10, 242}, + dictWord{12, 10, 605}, + dictWord{15, 10, 26}, + dictWord{144, 10, 44}, + dictWord{6, 0, 389}, + dictWord{ + 7, + 0, + 149, + }, + dictWord{9, 0, 142}, + dictWord{138, 0, 94}, + dictWord{140, 11, 71}, + dictWord{145, 10, 115}, + dictWord{6, 0, 8}, + dictWord{7, 0, 1881}, + dictWord{8, 0, 91}, + dictWord{11, 11, 966}, + dictWord{12, 11, 287}, + dictWord{13, 11, 342}, + dictWord{13, 11, 402}, + dictWord{15, 11, 110}, + dictWord{143, 11, 163}, + dictWord{ + 4, + 11, + 258, + }, + dictWord{136, 11, 639}, + dictWord{6, 11, 22}, + dictWord{7, 11, 903}, + dictWord{138, 11, 577}, + dictWord{133, 11, 681}, + dictWord{135, 10, 1111}, + dictWord{135, 11, 1286}, + dictWord{9, 0, 112}, + dictWord{8, 10, 1}, + dictWord{138, 10, 326}, + dictWord{5, 10, 488}, + dictWord{6, 10, 527}, + dictWord{7, 10, 489}, + dictWord{ + 7, + 10, + 1636, + }, + dictWord{8, 10, 121}, + dictWord{8, 10, 144}, + dictWord{8, 10, 359}, + dictWord{9, 10, 193}, + dictWord{9, 10, 241}, + dictWord{9, 10, 336}, + dictWord{ + 9, + 10, + 882, + }, + dictWord{11, 10, 266}, + dictWord{11, 10, 372}, + dictWord{11, 10, 944}, + dictWord{12, 10, 401}, + dictWord{140, 10, 641}, + dictWord{4, 11, 664}, + dictWord{133, 11, 804}, + dictWord{6, 0, 747}, + dictWord{134, 0, 1015}, + dictWord{135, 0, 1746}, + dictWord{9, 10, 31}, + dictWord{10, 10, 244}, + dictWord{ + 10, + 10, + 699, + }, + dictWord{12, 10, 149}, + dictWord{141, 10, 497}, + dictWord{133, 10, 377}, + dictWord{135, 0, 24}, + dictWord{6, 0, 1352}, + dictWord{5, 11, 32}, + dictWord{ + 145, + 10, + 101, + }, + dictWord{7, 0, 1530}, + dictWord{10, 0, 158}, + dictWord{13, 0, 13}, + dictWord{13, 0, 137}, + dictWord{13, 0, 258}, + dictWord{14, 0, 111}, + dictWord{ + 14, + 0, + 225, + }, + dictWord{14, 0, 253}, + dictWord{14, 0, 304}, + dictWord{14, 0, 339}, + dictWord{14, 0, 417}, + dictWord{146, 0, 33}, + dictWord{4, 0, 503}, + dictWord{ + 135, + 0, + 1661, + }, + dictWord{5, 0, 130}, + dictWord{6, 0, 845}, + dictWord{7, 0, 1314}, + dictWord{9, 0, 610}, + dictWord{10, 0, 718}, + dictWord{11, 0, 601}, + dictWord{11, 0, 819}, + dictWord{11, 0, 946}, + dictWord{140, 0, 536}, + dictWord{10, 0, 149}, + dictWord{11, 0, 280}, + dictWord{142, 0, 336}, + dictWord{134, 0, 1401}, + dictWord{ + 135, + 0, + 1946, + }, + dictWord{8, 0, 663}, + dictWord{144, 0, 8}, + dictWord{134, 0, 1607}, + dictWord{135, 10, 2023}, + dictWord{4, 11, 289}, + dictWord{7, 11, 629}, + dictWord{ + 7, + 11, + 1698, + }, + dictWord{7, 11, 1711}, + dictWord{140, 11, 215}, + dictWord{6, 11, 450}, + dictWord{136, 11, 109}, + dictWord{10, 0, 882}, + dictWord{10, 0, 883}, + dictWord{10, 0, 914}, + dictWord{138, 0, 928}, + dictWord{133, 10, 843}, + dictWord{136, 11, 705}, + dictWord{132, 10, 554}, + dictWord{133, 10, 536}, + dictWord{ + 5, + 0, + 417, + }, + dictWord{9, 10, 79}, + dictWord{11, 10, 625}, + dictWord{145, 10, 7}, + dictWord{7, 11, 1238}, + dictWord{142, 11, 37}, + dictWord{4, 0, 392}, + dictWord{ + 135, + 0, + 1597, + }, + dictWord{5, 0, 433}, + dictWord{9, 0, 633}, + dictWord{11, 0, 629}, + dictWord{132, 10, 424}, + dictWord{7, 10, 336}, + dictWord{136, 10, 785}, + dictWord{ + 134, + 11, + 355, + }, + dictWord{6, 0, 234}, + dictWord{7, 0, 769}, + dictWord{9, 0, 18}, + dictWord{138, 0, 358}, + dictWord{4, 10, 896}, + dictWord{134, 10, 1777}, + dictWord{ + 138, + 11, + 323, + }, + dictWord{7, 0, 140}, + dictWord{7, 0, 1950}, + dictWord{8, 0, 680}, + dictWord{11, 0, 817}, + dictWord{147, 0, 88}, + dictWord{7, 0, 1222}, + dictWord{ + 138, + 0, + 386, + }, + dictWord{139, 11, 908}, + dictWord{11, 0, 249}, + dictWord{12, 0, 313}, + dictWord{16, 0, 66}, + dictWord{145, 0, 26}, + dictWord{134, 0, 5}, + dictWord{7, 10, 750}, + dictWord{9, 10, 223}, + dictWord{11, 10, 27}, + dictWord{11, 10, 466}, + dictWord{12, 10, 624}, + dictWord{14, 10, 265}, + dictWord{146, 10, 61}, + dictWord{ + 134, + 11, + 26, + }, + dictWord{134, 0, 1216}, + dictWord{5, 0, 963}, + dictWord{134, 0, 1773}, + dictWord{4, 11, 414}, + dictWord{5, 11, 467}, + dictWord{9, 11, 654}, + dictWord{ + 10, + 11, + 451, + }, + dictWord{12, 11, 59}, + dictWord{141, 11, 375}, + dictWord{135, 11, 17}, + dictWord{4, 10, 603}, + dictWord{133, 10, 661}, + dictWord{4, 10, 11}, + dictWord{ + 6, + 10, + 128, + }, + dictWord{7, 10, 231}, + dictWord{7, 10, 1533}, + dictWord{138, 10, 725}, + dictWord{135, 11, 955}, + dictWord{7, 0, 180}, + dictWord{8, 0, 509}, + dictWord{ + 136, + 0, + 792, + }, + dictWord{132, 10, 476}, + dictWord{132, 0, 1002}, + dictWord{133, 11, 538}, + dictWord{135, 10, 1807}, + dictWord{132, 0, 931}, + dictWord{7, 0, 943}, + dictWord{11, 0, 614}, + dictWord{140, 0, 747}, + dictWord{135, 0, 1837}, + dictWord{9, 10, 20}, + dictWord{10, 10, 324}, + dictWord{10, 10, 807}, + dictWord{ + 139, + 10, + 488, + }, + dictWord{134, 0, 641}, + dictWord{6, 11, 280}, + dictWord{10, 11, 502}, + dictWord{11, 11, 344}, + dictWord{140, 11, 38}, + dictWord{5, 11, 45}, + dictWord{ + 7, + 11, + 1161, + }, + dictWord{11, 11, 448}, + dictWord{11, 11, 880}, + dictWord{13, 11, 139}, + dictWord{13, 11, 407}, + dictWord{15, 11, 16}, + dictWord{17, 11, 95}, + dictWord{ + 18, + 11, + 66, + }, + dictWord{18, 11, 88}, + dictWord{18, 11, 123}, + dictWord{149, 11, 7}, + dictWord{9, 0, 280}, + dictWord{138, 0, 134}, + dictWord{22, 0, 22}, + dictWord{23, 0, 5}, + dictWord{151, 0, 29}, + dictWord{136, 11, 777}, + dictWord{4, 0, 90}, + dictWord{5, 0, 545}, + dictWord{7, 0, 754}, + dictWord{9, 0, 186}, + dictWord{10, 0, 72}, + dictWord{ + 10, + 0, + 782, + }, + dictWord{11, 0, 577}, + dictWord{11, 0, 610}, + dictWord{11, 0, 960}, + dictWord{12, 0, 354}, + dictWord{12, 0, 362}, + dictWord{12, 0, 595}, + dictWord{ + 4, + 11, + 410, + }, + dictWord{135, 11, 521}, + dictWord{135, 11, 1778}, + dictWord{5, 10, 112}, + dictWord{6, 10, 103}, + dictWord{134, 10, 150}, + dictWord{138, 10, 356}, + dictWord{132, 0, 742}, + dictWord{7, 0, 151}, + dictWord{9, 0, 329}, + dictWord{139, 0, 254}, + dictWord{8, 0, 853}, + dictWord{8, 0, 881}, + dictWord{8, 0, 911}, + dictWord{ + 8, + 0, + 912, + }, + dictWord{10, 0, 872}, + dictWord{12, 0, 741}, + dictWord{12, 0, 742}, + dictWord{152, 0, 18}, + dictWord{4, 11, 573}, + dictWord{136, 11, 655}, + dictWord{ + 6, + 0, + 921, + }, + dictWord{134, 0, 934}, + dictWord{9, 0, 187}, + dictWord{10, 0, 36}, + dictWord{11, 0, 1016}, + dictWord{17, 0, 44}, + dictWord{146, 0, 64}, + dictWord{7, 0, 833}, + dictWord{136, 0, 517}, + dictWord{4, 0, 506}, + dictWord{5, 0, 295}, + dictWord{135, 0, 1680}, + dictWord{4, 10, 708}, + dictWord{8, 10, 15}, + dictWord{9, 10, 50}, + dictWord{ + 9, + 10, + 386, + }, + dictWord{11, 10, 18}, + dictWord{11, 10, 529}, + dictWord{140, 10, 228}, + dictWord{7, 0, 251}, + dictWord{7, 0, 1701}, + dictWord{8, 0, 436}, + dictWord{ + 4, + 10, + 563, + }, + dictWord{7, 10, 592}, + dictWord{7, 10, 637}, + dictWord{7, 10, 770}, + dictWord{8, 10, 463}, + dictWord{9, 10, 60}, + dictWord{9, 10, 335}, + dictWord{9, 10, 904}, + dictWord{10, 10, 73}, + dictWord{11, 10, 434}, + dictWord{12, 10, 585}, + dictWord{13, 10, 331}, + dictWord{18, 10, 110}, + dictWord{148, 10, 60}, + dictWord{ + 132, + 10, + 502, + }, + dictWord{136, 0, 584}, + dictWord{6, 10, 347}, + dictWord{138, 10, 161}, + dictWord{7, 0, 987}, + dictWord{9, 0, 688}, + dictWord{10, 0, 522}, + dictWord{ + 11, + 0, + 788, + }, + dictWord{12, 0, 137}, + dictWord{12, 0, 566}, + dictWord{14, 0, 9}, + dictWord{14, 0, 24}, + dictWord{14, 0, 64}, + dictWord{7, 11, 899}, + dictWord{142, 11, 325}, + dictWord{4, 0, 214}, + dictWord{5, 0, 500}, + dictWord{5, 10, 102}, + dictWord{6, 10, 284}, + dictWord{7, 10, 1079}, + dictWord{7, 10, 1423}, + dictWord{7, 10, 1702}, + dictWord{ + 8, + 10, + 470, + }, + dictWord{9, 10, 554}, + dictWord{9, 10, 723}, + dictWord{139, 10, 333}, + dictWord{7, 10, 246}, + dictWord{135, 10, 840}, + dictWord{6, 10, 10}, + dictWord{ + 8, + 10, + 571, + }, + dictWord{9, 10, 739}, + dictWord{143, 10, 91}, + dictWord{133, 10, 626}, + dictWord{146, 0, 195}, + dictWord{134, 0, 1775}, + dictWord{7, 0, 389}, + dictWord{7, 0, 700}, + dictWord{7, 0, 940}, + dictWord{8, 0, 514}, + dictWord{9, 0, 116}, + dictWord{9, 0, 535}, + dictWord{10, 0, 118}, + dictWord{11, 0, 107}, + dictWord{ + 11, + 0, + 148, + }, + dictWord{11, 0, 922}, + dictWord{12, 0, 254}, + dictWord{12, 0, 421}, + dictWord{142, 0, 238}, + dictWord{5, 10, 18}, + dictWord{6, 10, 526}, + dictWord{13, 10, 24}, + dictWord{13, 10, 110}, + dictWord{19, 10, 5}, + dictWord{147, 10, 44}, + dictWord{132, 0, 743}, + dictWord{11, 0, 292}, + dictWord{4, 10, 309}, + dictWord{5, 10, 462}, + dictWord{7, 10, 970}, + dictWord{135, 10, 1097}, + dictWord{22, 10, 30}, + dictWord{150, 10, 33}, + dictWord{139, 11, 338}, + dictWord{135, 11, 1598}, + dictWord{ + 7, + 0, + 1283, + }, + dictWord{9, 0, 227}, + dictWord{11, 0, 325}, + dictWord{11, 0, 408}, + dictWord{14, 0, 180}, + dictWord{146, 0, 47}, + dictWord{4, 0, 953}, + dictWord{6, 0, 1805}, + dictWord{6, 0, 1814}, + dictWord{6, 0, 1862}, + dictWord{140, 0, 774}, + dictWord{6, 11, 611}, + dictWord{135, 11, 1733}, + dictWord{135, 11, 1464}, + dictWord{ + 5, + 0, + 81, + }, + dictWord{7, 0, 146}, + dictWord{7, 0, 1342}, + dictWord{8, 0, 53}, + dictWord{8, 0, 561}, + dictWord{8, 0, 694}, + dictWord{8, 0, 754}, + dictWord{9, 0, 115}, + dictWord{ + 9, + 0, + 179, + }, + dictWord{9, 0, 894}, + dictWord{10, 0, 462}, + dictWord{10, 0, 813}, + dictWord{11, 0, 230}, + dictWord{11, 0, 657}, + dictWord{11, 0, 699}, + dictWord{11, 0, 748}, + dictWord{12, 0, 119}, + dictWord{12, 0, 200}, + dictWord{12, 0, 283}, + dictWord{142, 0, 273}, + dictWord{5, 0, 408}, + dictWord{6, 0, 789}, + dictWord{6, 0, 877}, + dictWord{ + 6, + 0, + 1253, + }, + dictWord{6, 0, 1413}, + dictWord{137, 0, 747}, + dictWord{134, 10, 1704}, + dictWord{135, 11, 663}, + dictWord{6, 0, 1910}, + dictWord{6, 0, 1915}, + dictWord{6, 0, 1923}, + dictWord{9, 0, 913}, + dictWord{9, 0, 928}, + dictWord{9, 0, 950}, + dictWord{9, 0, 954}, + dictWord{9, 0, 978}, + dictWord{9, 0, 993}, + dictWord{12, 0, 812}, + dictWord{12, 0, 819}, + dictWord{12, 0, 831}, + dictWord{12, 0, 833}, + dictWord{12, 0, 838}, + dictWord{12, 0, 909}, + dictWord{12, 0, 928}, + dictWord{12, 0, 931}, + dictWord{12, 0, 950}, + dictWord{15, 0, 186}, + dictWord{15, 0, 187}, + dictWord{15, 0, 195}, + dictWord{15, 0, 196}, + dictWord{15, 0, 209}, + dictWord{15, 0, 215}, + dictWord{ + 15, + 0, + 236, + }, + dictWord{15, 0, 241}, + dictWord{15, 0, 249}, + dictWord{15, 0, 253}, + dictWord{18, 0, 180}, + dictWord{18, 0, 221}, + dictWord{18, 0, 224}, + dictWord{ + 18, + 0, + 227, + }, + dictWord{18, 0, 229}, + dictWord{149, 0, 60}, + dictWord{7, 0, 1826}, + dictWord{135, 0, 1938}, + dictWord{11, 0, 490}, + dictWord{18, 0, 143}, + dictWord{ + 5, + 10, + 86, + }, + dictWord{7, 10, 743}, + dictWord{9, 10, 85}, + dictWord{10, 10, 281}, + dictWord{10, 10, 432}, + dictWord{12, 10, 251}, + dictWord{13, 10, 118}, + dictWord{ + 142, + 10, + 378, + }, + dictWord{5, 10, 524}, + dictWord{133, 10, 744}, + dictWord{141, 11, 442}, + dictWord{10, 10, 107}, + dictWord{140, 10, 436}, + dictWord{135, 11, 503}, + dictWord{134, 0, 1162}, + dictWord{132, 10, 927}, + dictWord{7, 0, 30}, + dictWord{8, 0, 86}, + dictWord{8, 0, 315}, + dictWord{8, 0, 700}, + dictWord{9, 0, 576}, + dictWord{ + 9, + 0, + 858, + }, + dictWord{10, 0, 414}, + dictWord{11, 0, 310}, + dictWord{11, 0, 888}, + dictWord{11, 0, 904}, + dictWord{12, 0, 361}, + dictWord{13, 0, 248}, + dictWord{13, 0, 371}, + dictWord{14, 0, 142}, + dictWord{12, 10, 670}, + dictWord{146, 10, 94}, + dictWord{134, 0, 721}, + dictWord{4, 11, 113}, + dictWord{5, 11, 163}, + dictWord{5, 11, 735}, + dictWord{7, 11, 1009}, + dictWord{7, 10, 1149}, + dictWord{9, 11, 9}, + dictWord{9, 10, 156}, + dictWord{9, 11, 771}, + dictWord{12, 11, 90}, + dictWord{13, 11, 138}, + dictWord{13, 11, 410}, + dictWord{143, 11, 128}, + dictWord{138, 0, 839}, + dictWord{133, 10, 778}, + dictWord{137, 0, 617}, + dictWord{133, 10, 502}, + dictWord{ + 8, + 10, + 196, + }, + dictWord{10, 10, 283}, + dictWord{139, 10, 406}, + dictWord{6, 0, 428}, + dictWord{7, 0, 524}, + dictWord{8, 0, 169}, + dictWord{8, 0, 234}, + dictWord{9, 0, 480}, + dictWord{138, 0, 646}, + dictWord{133, 10, 855}, + dictWord{134, 0, 1648}, + dictWord{7, 0, 1205}, + dictWord{138, 0, 637}, + dictWord{7, 0, 1596}, + dictWord{ + 4, + 11, + 935, + }, + dictWord{133, 11, 823}, + dictWord{5, 11, 269}, + dictWord{7, 11, 434}, + dictWord{7, 11, 891}, + dictWord{8, 11, 339}, + dictWord{9, 11, 702}, + dictWord{ + 11, + 11, + 594, + }, + dictWord{11, 11, 718}, + dictWord{145, 11, 100}, + dictWord{7, 11, 878}, + dictWord{9, 11, 485}, + dictWord{141, 11, 264}, + dictWord{4, 0, 266}, + dictWord{ + 8, + 0, + 4, + }, + dictWord{9, 0, 39}, + dictWord{10, 0, 166}, + dictWord{11, 0, 918}, + dictWord{12, 0, 635}, + dictWord{20, 0, 10}, + dictWord{22, 0, 27}, + dictWord{22, 0, 43}, + dictWord{ + 22, + 0, + 52, + }, + dictWord{134, 11, 1713}, + dictWord{7, 10, 1400}, + dictWord{9, 10, 446}, + dictWord{138, 10, 45}, + dictWord{135, 11, 900}, + dictWord{132, 0, 862}, + dictWord{134, 0, 1554}, + dictWord{135, 11, 1033}, + dictWord{19, 0, 16}, + dictWord{147, 11, 16}, + dictWord{135, 11, 1208}, + dictWord{7, 0, 157}, + dictWord{ + 136, + 0, + 279, + }, + dictWord{6, 0, 604}, + dictWord{136, 0, 391}, + dictWord{13, 10, 455}, + dictWord{15, 10, 99}, + dictWord{15, 10, 129}, + dictWord{144, 10, 68}, + dictWord{ + 135, + 10, + 172, + }, + dictWord{7, 0, 945}, + dictWord{11, 0, 713}, + dictWord{139, 0, 744}, + dictWord{4, 0, 973}, + dictWord{10, 0, 877}, + dictWord{10, 0, 937}, + dictWord{ + 10, + 0, + 938, + }, + dictWord{140, 0, 711}, + dictWord{139, 0, 1022}, + dictWord{132, 10, 568}, + dictWord{142, 11, 143}, + dictWord{4, 0, 567}, + dictWord{9, 0, 859}, + dictWord{ + 132, + 10, + 732, + }, + dictWord{7, 0, 1846}, + dictWord{136, 0, 628}, + dictWord{136, 10, 733}, + dictWord{133, 0, 762}, + dictWord{4, 10, 428}, + dictWord{135, 10, 1789}, + dictWord{10, 0, 784}, + dictWord{13, 0, 191}, + dictWord{7, 10, 2015}, + dictWord{140, 10, 665}, + dictWord{133, 0, 298}, + dictWord{7, 0, 633}, + dictWord{7, 0, 905}, + dictWord{7, 0, 909}, + dictWord{7, 0, 1538}, + dictWord{9, 0, 767}, + dictWord{140, 0, 636}, + dictWord{138, 10, 806}, + dictWord{132, 0, 795}, + dictWord{139, 0, 301}, + dictWord{135, 0, 1970}, + dictWord{5, 11, 625}, + dictWord{135, 11, 1617}, + dictWord{135, 11, 275}, + dictWord{7, 11, 37}, + dictWord{8, 11, 425}, + dictWord{ + 8, + 11, + 693, + }, + dictWord{9, 11, 720}, + dictWord{10, 11, 380}, + dictWord{10, 11, 638}, + dictWord{11, 11, 273}, + dictWord{11, 11, 307}, + dictWord{11, 11, 473}, + dictWord{ + 12, + 11, + 61, + }, + dictWord{143, 11, 43}, + dictWord{135, 11, 198}, + dictWord{134, 0, 1236}, + dictWord{7, 0, 369}, + dictWord{12, 0, 644}, + dictWord{12, 0, 645}, + dictWord{144, 0, 90}, + dictWord{19, 0, 15}, + dictWord{149, 0, 27}, + dictWord{6, 0, 71}, + dictWord{7, 0, 845}, + dictWord{8, 0, 160}, + dictWord{9, 0, 318}, + dictWord{6, 10, 1623}, + dictWord{134, 10, 1681}, + dictWord{134, 0, 1447}, + dictWord{134, 0, 1255}, + dictWord{138, 0, 735}, + dictWord{8, 0, 76}, + dictWord{132, 11, 168}, + dictWord{ + 6, + 10, + 1748, + }, + dictWord{8, 10, 715}, + dictWord{9, 10, 802}, + dictWord{10, 10, 46}, + dictWord{10, 10, 819}, + dictWord{13, 10, 308}, + dictWord{14, 10, 351}, + dictWord{14, 10, 363}, + dictWord{146, 10, 67}, + dictWord{135, 11, 91}, + dictWord{6, 0, 474}, + dictWord{4, 10, 63}, + dictWord{133, 10, 347}, + dictWord{133, 10, 749}, + dictWord{138, 0, 841}, + dictWord{133, 10, 366}, + dictWord{6, 0, 836}, + dictWord{132, 11, 225}, + dictWord{135, 0, 1622}, + dictWord{135, 10, 89}, + dictWord{ + 140, + 0, + 735, + }, + dictWord{134, 0, 1601}, + dictWord{138, 11, 145}, + dictWord{6, 0, 1390}, + dictWord{137, 0, 804}, + dictWord{142, 0, 394}, + dictWord{6, 11, 15}, + dictWord{ + 7, + 11, + 70, + }, + dictWord{10, 11, 240}, + dictWord{147, 11, 93}, + dictWord{6, 0, 96}, + dictWord{135, 0, 1426}, + dictWord{4, 0, 651}, + dictWord{133, 0, 289}, + dictWord{ + 7, + 11, + 956, + }, + dictWord{7, 10, 977}, + dictWord{7, 11, 1157}, + dictWord{7, 11, 1506}, + dictWord{7, 11, 1606}, + dictWord{7, 11, 1615}, + dictWord{7, 11, 1619}, + dictWord{ + 7, + 11, + 1736, + }, + dictWord{7, 11, 1775}, + dictWord{8, 11, 590}, + dictWord{9, 11, 324}, + dictWord{9, 11, 736}, + dictWord{9, 11, 774}, + dictWord{9, 11, 776}, + dictWord{ + 9, + 11, + 784, + }, + dictWord{10, 11, 567}, + dictWord{10, 11, 708}, + dictWord{11, 11, 518}, + dictWord{11, 11, 613}, + dictWord{11, 11, 695}, + dictWord{11, 11, 716}, + dictWord{11, 11, 739}, + dictWord{11, 11, 770}, + dictWord{11, 11, 771}, + dictWord{11, 11, 848}, + dictWord{11, 11, 857}, + dictWord{11, 11, 931}, + dictWord{ + 11, + 11, + 947, + }, + dictWord{12, 11, 326}, + dictWord{12, 11, 387}, + dictWord{12, 11, 484}, + dictWord{12, 11, 528}, + dictWord{12, 11, 552}, + dictWord{12, 11, 613}, + dictWord{ + 13, + 11, + 189, + }, + dictWord{13, 11, 256}, + dictWord{13, 11, 340}, + dictWord{13, 11, 432}, + dictWord{13, 11, 436}, + dictWord{13, 11, 440}, + dictWord{13, 11, 454}, + dictWord{14, 11, 174}, + dictWord{14, 11, 220}, + dictWord{14, 11, 284}, + dictWord{14, 11, 390}, + dictWord{145, 11, 121}, + dictWord{7, 0, 688}, + dictWord{8, 0, 35}, + dictWord{9, 0, 511}, + dictWord{10, 0, 767}, + dictWord{147, 0, 118}, + dictWord{134, 0, 667}, + dictWord{4, 0, 513}, + dictWord{5, 10, 824}, + dictWord{133, 10, 941}, + dictWord{7, 10, 440}, + dictWord{8, 10, 230}, + dictWord{139, 10, 106}, + dictWord{134, 0, 2034}, + dictWord{135, 11, 1399}, + dictWord{143, 11, 66}, + dictWord{ + 135, + 11, + 1529, + }, + dictWord{4, 11, 145}, + dictWord{6, 11, 176}, + dictWord{7, 11, 395}, + dictWord{9, 11, 562}, + dictWord{144, 11, 28}, + dictWord{132, 11, 501}, + dictWord{132, 0, 704}, + dictWord{134, 0, 1524}, + dictWord{7, 0, 1078}, + dictWord{134, 11, 464}, + dictWord{6, 11, 509}, + dictWord{10, 11, 82}, + dictWord{20, 11, 91}, + dictWord{151, 11, 13}, + dictWord{4, 0, 720}, + dictWord{133, 0, 306}, + dictWord{133, 0, 431}, + dictWord{7, 0, 1196}, + dictWord{4, 10, 914}, + dictWord{5, 10, 800}, + dictWord{133, 10, 852}, + dictWord{135, 11, 1189}, + dictWord{10, 0, 54}, + dictWord{141, 10, 115}, + dictWord{7, 10, 564}, + dictWord{142, 10, 168}, + dictWord{ + 5, + 0, + 464, + }, + dictWord{6, 0, 236}, + dictWord{7, 0, 696}, + dictWord{7, 0, 914}, + dictWord{7, 0, 1108}, + dictWord{7, 0, 1448}, + dictWord{9, 0, 15}, + dictWord{9, 0, 564}, + dictWord{ + 10, + 0, + 14, + }, + dictWord{12, 0, 565}, + dictWord{13, 0, 449}, + dictWord{14, 0, 53}, + dictWord{15, 0, 13}, + dictWord{16, 0, 64}, + dictWord{17, 0, 41}, + dictWord{4, 10, 918}, + dictWord{133, 10, 876}, + dictWord{6, 0, 1418}, + dictWord{134, 10, 1764}, + dictWord{4, 10, 92}, + dictWord{133, 10, 274}, + dictWord{134, 0, 907}, + dictWord{ + 4, + 11, + 114, + }, + dictWord{8, 10, 501}, + dictWord{9, 11, 492}, + dictWord{13, 11, 462}, + dictWord{142, 11, 215}, + dictWord{4, 11, 77}, + dictWord{5, 11, 361}, + dictWord{ + 6, + 11, + 139, + }, + dictWord{6, 11, 401}, + dictWord{6, 11, 404}, + dictWord{7, 11, 413}, + dictWord{7, 11, 715}, + dictWord{7, 11, 1716}, + dictWord{11, 11, 279}, + dictWord{ + 12, + 11, + 179, + }, + dictWord{12, 11, 258}, + dictWord{13, 11, 244}, + dictWord{142, 11, 358}, + dictWord{6, 0, 1767}, + dictWord{12, 0, 194}, + dictWord{145, 0, 107}, + dictWord{ + 134, + 11, + 1717, + }, + dictWord{5, 10, 743}, + dictWord{142, 11, 329}, + dictWord{4, 10, 49}, + dictWord{7, 10, 280}, + dictWord{135, 10, 1633}, + dictWord{5, 0, 840}, + dictWord{7, 11, 1061}, + dictWord{8, 11, 82}, + dictWord{11, 11, 250}, + dictWord{12, 11, 420}, + dictWord{141, 11, 184}, + dictWord{135, 11, 724}, + dictWord{ + 134, + 0, + 900, + }, + dictWord{136, 10, 47}, + dictWord{134, 0, 1436}, + dictWord{144, 11, 0}, + dictWord{6, 0, 675}, + dictWord{7, 0, 1008}, + dictWord{7, 0, 1560}, + dictWord{ + 9, + 0, + 642, + }, + dictWord{11, 0, 236}, + dictWord{14, 0, 193}, + dictWord{5, 10, 272}, + dictWord{5, 10, 908}, + dictWord{5, 10, 942}, + dictWord{8, 10, 197}, + dictWord{9, 10, 47}, + dictWord{11, 10, 538}, + dictWord{139, 10, 742}, + dictWord{4, 0, 68}, + dictWord{5, 0, 628}, + dictWord{5, 0, 634}, + dictWord{6, 0, 386}, + dictWord{7, 0, 794}, + dictWord{ + 8, + 0, + 273, + }, + dictWord{9, 0, 563}, + dictWord{10, 0, 105}, + dictWord{10, 0, 171}, + dictWord{11, 0, 94}, + dictWord{139, 0, 354}, + dictWord{135, 10, 1911}, + dictWord{ + 137, + 10, + 891, + }, + dictWord{4, 0, 95}, + dictWord{6, 0, 1297}, + dictWord{6, 0, 1604}, + dictWord{7, 0, 416}, + dictWord{139, 0, 830}, + dictWord{6, 11, 513}, + dictWord{ + 135, + 11, + 1052, + }, + dictWord{7, 0, 731}, + dictWord{13, 0, 20}, + dictWord{143, 0, 11}, + dictWord{137, 11, 899}, + dictWord{10, 0, 850}, + dictWord{140, 0, 697}, + dictWord{ + 4, + 0, + 662, + }, + dictWord{7, 11, 1417}, + dictWord{12, 11, 382}, + dictWord{17, 11, 48}, + dictWord{152, 11, 12}, + dictWord{133, 0, 736}, + dictWord{132, 0, 861}, + dictWord{ + 4, + 10, + 407, + }, + dictWord{132, 10, 560}, + dictWord{141, 10, 490}, + dictWord{6, 11, 545}, + dictWord{7, 11, 565}, + dictWord{7, 11, 1669}, + dictWord{10, 11, 114}, + dictWord{11, 11, 642}, + dictWord{140, 11, 618}, + dictWord{6, 0, 871}, + dictWord{134, 0, 1000}, + dictWord{5, 0, 864}, + dictWord{10, 0, 648}, + dictWord{11, 0, 671}, + dictWord{15, 0, 46}, + dictWord{133, 11, 5}, + dictWord{133, 0, 928}, + dictWord{11, 0, 90}, + dictWord{13, 0, 7}, + dictWord{4, 10, 475}, + dictWord{11, 10, 35}, + dictWord{ + 13, + 10, + 71, + }, + dictWord{13, 10, 177}, + dictWord{142, 10, 422}, + dictWord{136, 0, 332}, + dictWord{135, 11, 192}, + dictWord{134, 0, 1055}, + dictWord{136, 11, 763}, + dictWord{11, 0, 986}, + dictWord{140, 0, 682}, + dictWord{7, 0, 76}, + dictWord{8, 0, 44}, + dictWord{9, 0, 884}, + dictWord{10, 0, 580}, + dictWord{11, 0, 399}, + dictWord{ + 11, + 0, + 894, + }, + dictWord{143, 0, 122}, + dictWord{135, 11, 1237}, + dictWord{135, 10, 636}, + dictWord{11, 0, 300}, + dictWord{6, 10, 222}, + dictWord{7, 10, 1620}, + dictWord{ + 8, + 10, + 409, + }, + dictWord{137, 10, 693}, + dictWord{4, 11, 87}, + dictWord{5, 11, 250}, + dictWord{10, 11, 601}, + dictWord{13, 11, 298}, + dictWord{13, 11, 353}, + dictWord{141, 11, 376}, + dictWord{5, 0, 518}, + dictWord{10, 0, 340}, + dictWord{11, 0, 175}, + dictWord{149, 0, 16}, + dictWord{140, 0, 771}, + dictWord{6, 0, 1108}, + dictWord{137, 0, 831}, + dictWord{132, 0, 836}, + dictWord{135, 0, 1852}, + dictWord{4, 0, 957}, + dictWord{6, 0, 1804}, + dictWord{8, 0, 842}, + dictWord{8, 0, 843}, + dictWord{ + 8, + 0, + 851, + }, + dictWord{8, 0, 855}, + dictWord{140, 0, 767}, + dictWord{135, 11, 814}, + dictWord{4, 11, 57}, + dictWord{7, 11, 1195}, + dictWord{7, 11, 1438}, + dictWord{ + 7, + 11, + 1548, + }, + dictWord{7, 11, 1835}, + dictWord{7, 11, 1904}, + dictWord{9, 11, 757}, + dictWord{10, 11, 604}, + dictWord{139, 11, 519}, + dictWord{133, 10, 882}, + dictWord{138, 0, 246}, + dictWord{4, 0, 934}, + dictWord{5, 0, 202}, + dictWord{8, 0, 610}, + dictWord{7, 11, 1897}, + dictWord{12, 11, 290}, + dictWord{13, 11, 80}, + dictWord{13, 11, 437}, + dictWord{145, 11, 74}, + dictWord{8, 0, 96}, + dictWord{9, 0, 36}, + dictWord{10, 0, 607}, + dictWord{10, 0, 804}, + dictWord{10, 0, 832}, + dictWord{ + 11, + 0, + 423, + }, + dictWord{11, 0, 442}, + dictWord{12, 0, 309}, + dictWord{14, 0, 199}, + dictWord{15, 0, 90}, + dictWord{145, 0, 110}, + dictWord{132, 10, 426}, + dictWord{ + 7, + 0, + 654, + }, + dictWord{8, 0, 240}, + dictWord{6, 10, 58}, + dictWord{7, 10, 745}, + dictWord{7, 10, 1969}, + dictWord{8, 10, 675}, + dictWord{9, 10, 479}, + dictWord{9, 10, 731}, + dictWord{10, 10, 330}, + dictWord{10, 10, 593}, + dictWord{10, 10, 817}, + dictWord{11, 10, 32}, + dictWord{11, 10, 133}, + dictWord{11, 10, 221}, + dictWord{ + 145, + 10, + 68, + }, + dictWord{9, 0, 13}, + dictWord{9, 0, 398}, + dictWord{9, 0, 727}, + dictWord{10, 0, 75}, + dictWord{10, 0, 184}, + dictWord{10, 0, 230}, + dictWord{10, 0, 564}, + dictWord{ + 10, + 0, + 569, + }, + dictWord{11, 0, 973}, + dictWord{12, 0, 70}, + dictWord{12, 0, 189}, + dictWord{13, 0, 57}, + dictWord{141, 0, 257}, + dictWord{4, 11, 209}, + dictWord{ + 135, + 11, + 902, + }, + dictWord{7, 0, 391}, + dictWord{137, 10, 538}, + dictWord{134, 0, 403}, + dictWord{6, 11, 303}, + dictWord{7, 11, 335}, + dictWord{7, 11, 1437}, + dictWord{ + 7, + 11, + 1668, + }, + dictWord{8, 11, 553}, + dictWord{8, 11, 652}, + dictWord{8, 11, 656}, + dictWord{9, 11, 558}, + dictWord{11, 11, 743}, + dictWord{149, 11, 18}, + dictWord{ + 132, + 11, + 559, + }, + dictWord{11, 0, 75}, + dictWord{142, 0, 267}, + dictWord{6, 0, 815}, + dictWord{141, 11, 2}, + dictWord{141, 0, 366}, + dictWord{137, 0, 631}, + dictWord{ + 133, + 11, + 1017, + }, + dictWord{5, 0, 345}, + dictWord{135, 0, 1016}, + dictWord{133, 11, 709}, + dictWord{134, 11, 1745}, + dictWord{133, 10, 566}, + dictWord{7, 0, 952}, + dictWord{6, 10, 48}, + dictWord{9, 10, 139}, + dictWord{10, 10, 399}, + dictWord{11, 10, 469}, + dictWord{12, 10, 634}, + dictWord{141, 10, 223}, + dictWord{ + 133, + 0, + 673, + }, + dictWord{9, 0, 850}, + dictWord{7, 11, 8}, + dictWord{136, 11, 206}, + dictWord{6, 0, 662}, + dictWord{149, 0, 35}, + dictWord{4, 0, 287}, + dictWord{133, 0, 1018}, + dictWord{6, 10, 114}, + dictWord{7, 10, 1224}, + dictWord{7, 10, 1556}, + dictWord{136, 10, 3}, + dictWord{8, 10, 576}, + dictWord{137, 10, 267}, + dictWord{4, 0, 884}, + dictWord{5, 0, 34}, + dictWord{10, 0, 724}, + dictWord{12, 0, 444}, + dictWord{13, 0, 354}, + dictWord{18, 0, 32}, + dictWord{23, 0, 24}, + dictWord{23, 0, 31}, + dictWord{ + 152, + 0, + 5, + }, + dictWord{133, 10, 933}, + dictWord{132, 11, 776}, + dictWord{138, 0, 151}, + dictWord{136, 0, 427}, + dictWord{134, 0, 382}, + dictWord{132, 0, 329}, + dictWord{ + 9, + 0, + 846, + }, + dictWord{10, 0, 827}, + dictWord{138, 11, 33}, + dictWord{9, 0, 279}, + dictWord{10, 0, 407}, + dictWord{14, 0, 84}, + dictWord{22, 0, 18}, + dictWord{ + 135, + 11, + 1297, + }, + dictWord{136, 11, 406}, + dictWord{132, 0, 906}, + dictWord{136, 0, 366}, + dictWord{134, 0, 843}, + dictWord{134, 0, 1443}, + dictWord{135, 0, 1372}, + dictWord{138, 0, 992}, + dictWord{4, 0, 123}, + dictWord{5, 0, 605}, + dictWord{7, 0, 1509}, + dictWord{136, 0, 36}, + dictWord{132, 0, 649}, + dictWord{8, 11, 175}, + dictWord{10, 11, 168}, + dictWord{138, 11, 573}, + dictWord{133, 0, 767}, + dictWord{134, 0, 1018}, + dictWord{135, 11, 1305}, + dictWord{12, 10, 30}, + dictWord{ + 13, + 10, + 148, + }, + dictWord{14, 10, 87}, + dictWord{14, 10, 182}, + dictWord{16, 10, 42}, + dictWord{148, 10, 70}, + dictWord{134, 11, 607}, + dictWord{4, 0, 273}, + dictWord{ + 5, + 0, + 658, + }, + dictWord{133, 0, 995}, + dictWord{6, 0, 72}, + dictWord{139, 11, 174}, + dictWord{10, 0, 483}, + dictWord{12, 0, 368}, + dictWord{7, 10, 56}, + dictWord{ + 7, + 10, + 1989, + }, + dictWord{8, 10, 337}, + dictWord{8, 10, 738}, + dictWord{9, 10, 600}, + dictWord{13, 10, 447}, + dictWord{142, 10, 92}, + dictWord{5, 11, 784}, + dictWord{ + 138, + 10, + 666, + }, + dictWord{135, 0, 1345}, + dictWord{139, 11, 882}, + dictWord{134, 0, 1293}, + dictWord{133, 0, 589}, + dictWord{134, 0, 1988}, + dictWord{5, 0, 117}, + dictWord{6, 0, 514}, + dictWord{6, 0, 541}, + dictWord{7, 0, 1164}, + dictWord{7, 0, 1436}, + dictWord{8, 0, 220}, + dictWord{8, 0, 648}, + dictWord{10, 0, 688}, + dictWord{ + 139, + 0, + 560, + }, + dictWord{136, 0, 379}, + dictWord{5, 0, 686}, + dictWord{7, 10, 866}, + dictWord{135, 10, 1163}, + dictWord{132, 10, 328}, + dictWord{9, 11, 14}, + dictWord{ + 9, + 11, + 441, + }, + dictWord{10, 11, 306}, + dictWord{139, 11, 9}, + dictWord{4, 10, 101}, + dictWord{135, 10, 1171}, + dictWord{5, 10, 833}, + dictWord{136, 10, 744}, + dictWord{5, 11, 161}, + dictWord{7, 11, 839}, + dictWord{135, 11, 887}, + dictWord{7, 0, 196}, + dictWord{10, 0, 765}, + dictWord{11, 0, 347}, + dictWord{11, 0, 552}, + dictWord{11, 0, 790}, + dictWord{12, 0, 263}, + dictWord{13, 0, 246}, + dictWord{13, 0, 270}, + dictWord{13, 0, 395}, + dictWord{14, 0, 176}, + dictWord{14, 0, 190}, + dictWord{ + 14, + 0, + 398, + }, + dictWord{14, 0, 412}, + dictWord{15, 0, 32}, + dictWord{15, 0, 63}, + dictWord{16, 0, 88}, + dictWord{147, 0, 105}, + dictWord{6, 10, 9}, + dictWord{6, 10, 397}, + dictWord{7, 10, 53}, + dictWord{7, 10, 1742}, + dictWord{10, 10, 632}, + dictWord{11, 10, 828}, + dictWord{140, 10, 146}, + dictWord{5, 0, 381}, + dictWord{135, 0, 1792}, + dictWord{134, 0, 1452}, + dictWord{135, 11, 429}, + dictWord{8, 0, 367}, + dictWord{10, 0, 760}, + dictWord{14, 0, 79}, + dictWord{20, 0, 17}, + dictWord{152, 0, 0}, + dictWord{7, 0, 616}, + dictWord{138, 0, 413}, + dictWord{11, 10, 417}, + dictWord{12, 10, 223}, + dictWord{140, 10, 265}, + dictWord{7, 11, 1611}, + dictWord{13, 11, 14}, + dictWord{15, 11, 44}, + dictWord{19, 11, 13}, + dictWord{148, 11, 76}, + dictWord{135, 0, 1229}, + dictWord{6, 0, 120}, + dictWord{7, 0, 1188}, + dictWord{7, 0, 1710}, + dictWord{8, 0, 286}, + dictWord{9, 0, 667}, + dictWord{11, 0, 592}, + dictWord{139, 0, 730}, + dictWord{135, 11, 1814}, + dictWord{135, 0, 1146}, + dictWord{4, 10, 186}, + dictWord{5, 10, 157}, + dictWord{8, 10, 168}, + dictWord{138, 10, 6}, + dictWord{4, 0, 352}, + dictWord{135, 0, 687}, + dictWord{4, 0, 192}, + dictWord{5, 0, 49}, + dictWord{ + 6, + 0, + 200, + }, + dictWord{6, 0, 293}, + dictWord{6, 0, 1696}, + dictWord{135, 0, 1151}, + dictWord{133, 10, 875}, + dictWord{5, 10, 773}, + dictWord{5, 10, 991}, + dictWord{ + 6, + 10, + 1635, + }, + dictWord{134, 10, 1788}, + dictWord{7, 10, 111}, + dictWord{136, 10, 581}, + dictWord{6, 0, 935}, + dictWord{134, 0, 1151}, + dictWord{134, 0, 1050}, + dictWord{132, 0, 650}, + dictWord{132, 0, 147}, + dictWord{11, 0, 194}, + dictWord{12, 0, 62}, + dictWord{12, 0, 88}, + dictWord{11, 11, 194}, + dictWord{12, 11, 62}, + dictWord{140, 11, 88}, + dictWord{6, 0, 339}, + dictWord{135, 0, 923}, + dictWord{134, 10, 1747}, + dictWord{7, 11, 643}, + dictWord{136, 11, 236}, + dictWord{ + 133, + 0, + 934, + }, + dictWord{7, 10, 1364}, + dictWord{7, 10, 1907}, + dictWord{141, 10, 158}, + dictWord{132, 10, 659}, + dictWord{4, 10, 404}, + dictWord{135, 10, 675}, + dictWord{7, 11, 581}, + dictWord{9, 11, 644}, + dictWord{137, 11, 699}, + dictWord{13, 0, 211}, + dictWord{14, 0, 133}, + dictWord{14, 0, 204}, + dictWord{15, 0, 64}, + dictWord{ + 15, + 0, + 69, + }, + dictWord{15, 0, 114}, + dictWord{16, 0, 10}, + dictWord{19, 0, 23}, + dictWord{19, 0, 35}, + dictWord{19, 0, 39}, + dictWord{19, 0, 51}, + dictWord{19, 0, 71}, + dictWord{19, 0, 75}, + dictWord{152, 0, 15}, + dictWord{133, 10, 391}, + dictWord{5, 11, 54}, + dictWord{135, 11, 1513}, + dictWord{7, 0, 222}, + dictWord{8, 0, 341}, + dictWord{ + 5, + 10, + 540, + }, + dictWord{134, 10, 1697}, + dictWord{134, 10, 78}, + dictWord{132, 11, 744}, + dictWord{136, 0, 293}, + dictWord{137, 11, 701}, + dictWord{ + 7, + 11, + 930, + }, + dictWord{10, 11, 402}, + dictWord{10, 11, 476}, + dictWord{13, 11, 452}, + dictWord{18, 11, 55}, + dictWord{147, 11, 104}, + dictWord{132, 0, 637}, + dictWord{133, 10, 460}, + dictWord{8, 11, 50}, + dictWord{137, 11, 624}, + dictWord{132, 11, 572}, + dictWord{134, 0, 1159}, + dictWord{4, 10, 199}, + dictWord{ + 139, + 10, + 34, + }, + dictWord{134, 0, 847}, + dictWord{134, 10, 388}, + dictWord{6, 11, 43}, + dictWord{7, 11, 38}, + dictWord{8, 11, 248}, + dictWord{9, 11, 504}, + dictWord{ + 138, + 11, + 513, + }, + dictWord{9, 0, 683}, + dictWord{4, 10, 511}, + dictWord{6, 10, 608}, + dictWord{9, 10, 333}, + dictWord{10, 10, 602}, + dictWord{11, 10, 441}, + dictWord{ + 11, + 10, + 723, + }, + dictWord{11, 10, 976}, + dictWord{140, 10, 357}, + dictWord{9, 0, 867}, + dictWord{138, 0, 837}, + dictWord{6, 0, 944}, + dictWord{135, 11, 326}, + dictWord{ + 135, + 0, + 1809, + }, + dictWord{5, 10, 938}, + dictWord{7, 11, 783}, + dictWord{136, 10, 707}, + dictWord{133, 11, 766}, + dictWord{133, 11, 363}, + dictWord{6, 0, 170}, + dictWord{7, 0, 1080}, + dictWord{8, 0, 395}, + dictWord{8, 0, 487}, + dictWord{141, 0, 147}, + dictWord{6, 11, 258}, + dictWord{140, 11, 409}, + dictWord{4, 0, 535}, + dictWord{ + 8, + 0, + 618, + }, + dictWord{5, 11, 249}, + dictWord{148, 11, 82}, + dictWord{6, 0, 1379}, + dictWord{149, 11, 15}, + dictWord{135, 0, 1625}, + dictWord{150, 0, 23}, + dictWord{ + 5, + 11, + 393, + }, + dictWord{6, 11, 378}, + dictWord{7, 11, 1981}, + dictWord{9, 11, 32}, + dictWord{9, 11, 591}, + dictWord{10, 11, 685}, + dictWord{10, 11, 741}, + dictWord{ + 142, + 11, + 382, + }, + dictWord{133, 11, 788}, + dictWord{7, 11, 1968}, + dictWord{10, 11, 19}, + dictWord{139, 11, 911}, + dictWord{7, 11, 1401}, + dictWord{ + 135, + 11, + 1476, + }, + dictWord{4, 11, 61}, + dictWord{5, 11, 58}, + dictWord{5, 11, 171}, + dictWord{5, 11, 635}, + dictWord{5, 11, 683}, + dictWord{5, 11, 700}, + dictWord{6, 11, 291}, + dictWord{6, 11, 566}, + dictWord{7, 11, 1650}, + dictWord{11, 11, 523}, + dictWord{12, 11, 273}, + dictWord{12, 11, 303}, + dictWord{15, 11, 39}, + dictWord{ + 143, + 11, + 111, + }, + dictWord{6, 10, 469}, + dictWord{7, 10, 1709}, + dictWord{138, 10, 515}, + dictWord{4, 0, 778}, + dictWord{134, 11, 589}, + dictWord{132, 0, 46}, + dictWord{ + 5, + 0, + 811, + }, + dictWord{6, 0, 1679}, + dictWord{6, 0, 1714}, + dictWord{135, 0, 2032}, + dictWord{7, 0, 1458}, + dictWord{9, 0, 407}, + dictWord{11, 0, 15}, + dictWord{12, 0, 651}, + dictWord{149, 0, 37}, + dictWord{7, 0, 938}, + dictWord{132, 10, 500}, + dictWord{6, 0, 34}, + dictWord{7, 0, 69}, + dictWord{7, 0, 1089}, + dictWord{7, 0, 1281}, + dictWord{ + 8, + 0, + 708, + }, + dictWord{8, 0, 721}, + dictWord{9, 0, 363}, + dictWord{148, 0, 98}, + dictWord{10, 11, 231}, + dictWord{147, 11, 124}, + dictWord{7, 11, 726}, + dictWord{ + 152, + 11, + 9, + }, + dictWord{5, 10, 68}, + dictWord{134, 10, 383}, + dictWord{136, 11, 583}, + dictWord{4, 11, 917}, + dictWord{133, 11, 1005}, + dictWord{11, 10, 216}, + dictWord{139, 10, 340}, + dictWord{135, 11, 1675}, + dictWord{8, 0, 441}, + dictWord{10, 0, 314}, + dictWord{143, 0, 3}, + dictWord{132, 11, 919}, + dictWord{4, 10, 337}, + dictWord{6, 10, 353}, + dictWord{7, 10, 1934}, + dictWord{8, 10, 488}, + dictWord{137, 10, 429}, + dictWord{7, 0, 889}, + dictWord{7, 10, 1795}, + dictWord{8, 10, 259}, + dictWord{9, 10, 135}, + dictWord{9, 10, 177}, + dictWord{9, 10, 860}, + dictWord{10, 10, 825}, + dictWord{11, 10, 115}, + dictWord{11, 10, 370}, + dictWord{11, 10, 405}, + dictWord{11, 10, 604}, + dictWord{12, 10, 10}, + dictWord{12, 10, 667}, + dictWord{12, 10, 669}, + dictWord{13, 10, 76}, + dictWord{14, 10, 310}, + dictWord{ + 15, + 10, + 76, + }, + dictWord{15, 10, 147}, + dictWord{148, 10, 23}, + dictWord{4, 10, 15}, + dictWord{4, 11, 255}, + dictWord{5, 10, 22}, + dictWord{5, 11, 302}, + dictWord{6, 11, 132}, + dictWord{6, 10, 244}, + dictWord{7, 10, 40}, + dictWord{7, 11, 128}, + dictWord{7, 10, 200}, + dictWord{7, 11, 283}, + dictWord{7, 10, 906}, + dictWord{7, 10, 1199}, + dictWord{ + 7, + 11, + 1299, + }, + dictWord{9, 10, 616}, + dictWord{10, 11, 52}, + dictWord{10, 11, 514}, + dictWord{10, 10, 716}, + dictWord{11, 10, 635}, + dictWord{11, 10, 801}, + dictWord{11, 11, 925}, + dictWord{12, 10, 458}, + dictWord{13, 11, 92}, + dictWord{142, 11, 309}, + dictWord{132, 0, 462}, + dictWord{137, 11, 173}, + dictWord{ + 135, + 10, + 1735, + }, + dictWord{8, 0, 525}, + dictWord{5, 10, 598}, + dictWord{7, 10, 791}, + dictWord{8, 10, 108}, + dictWord{137, 10, 123}, + dictWord{5, 0, 73}, + dictWord{6, 0, 23}, + dictWord{134, 0, 338}, + dictWord{132, 0, 676}, + dictWord{132, 10, 683}, + dictWord{7, 0, 725}, + dictWord{8, 0, 498}, + dictWord{139, 0, 268}, + dictWord{12, 0, 21}, + dictWord{151, 0, 7}, + dictWord{135, 0, 773}, + dictWord{4, 10, 155}, + dictWord{135, 10, 1689}, + dictWord{4, 0, 164}, + dictWord{5, 0, 730}, + dictWord{5, 10, 151}, + dictWord{ + 5, + 10, + 741, + }, + dictWord{6, 11, 210}, + dictWord{7, 10, 498}, + dictWord{7, 10, 870}, + dictWord{7, 10, 1542}, + dictWord{12, 10, 213}, + dictWord{14, 10, 36}, + dictWord{ + 14, + 10, + 391, + }, + dictWord{17, 10, 111}, + dictWord{18, 10, 6}, + dictWord{18, 10, 46}, + dictWord{18, 10, 151}, + dictWord{19, 10, 36}, + dictWord{20, 10, 32}, + dictWord{ + 20, + 10, + 56, + }, + dictWord{20, 10, 69}, + dictWord{20, 10, 102}, + dictWord{21, 10, 4}, + dictWord{22, 10, 8}, + dictWord{22, 10, 10}, + dictWord{22, 10, 14}, + dictWord{ + 150, + 10, + 31, + }, + dictWord{4, 10, 624}, + dictWord{135, 10, 1752}, + dictWord{4, 0, 583}, + dictWord{9, 0, 936}, + dictWord{15, 0, 214}, + dictWord{18, 0, 199}, + dictWord{24, 0, 26}, + dictWord{134, 11, 588}, + dictWord{7, 0, 1462}, + dictWord{11, 0, 659}, + dictWord{4, 11, 284}, + dictWord{134, 11, 223}, + dictWord{133, 0, 220}, + dictWord{ + 139, + 0, + 803, + }, + dictWord{132, 0, 544}, + dictWord{4, 10, 492}, + dictWord{133, 10, 451}, + dictWord{16, 0, 98}, + dictWord{148, 0, 119}, + dictWord{4, 11, 218}, + dictWord{ + 7, + 11, + 526, + }, + dictWord{143, 11, 137}, + dictWord{135, 10, 835}, + dictWord{4, 11, 270}, + dictWord{5, 11, 192}, + dictWord{6, 11, 332}, + dictWord{7, 11, 1322}, + dictWord{ + 13, + 11, + 9, + }, + dictWord{13, 10, 70}, + dictWord{14, 11, 104}, + dictWord{142, 11, 311}, + dictWord{132, 10, 539}, + dictWord{140, 11, 661}, + dictWord{5, 0, 176}, + dictWord{ + 6, + 0, + 437, + }, + dictWord{6, 0, 564}, + dictWord{11, 0, 181}, + dictWord{141, 0, 183}, + dictWord{135, 0, 1192}, + dictWord{6, 10, 113}, + dictWord{135, 10, 436}, + dictWord{136, 10, 718}, + dictWord{135, 10, 520}, + dictWord{135, 0, 1878}, + dictWord{140, 11, 196}, + dictWord{7, 11, 379}, + dictWord{8, 11, 481}, + dictWord{ + 137, + 11, + 377, + }, + dictWord{5, 11, 1003}, + dictWord{6, 11, 149}, + dictWord{137, 11, 746}, + dictWord{8, 11, 262}, + dictWord{9, 11, 627}, + dictWord{10, 11, 18}, + dictWord{ + 11, + 11, + 214, + }, + dictWord{11, 11, 404}, + dictWord{11, 11, 457}, + dictWord{11, 11, 780}, + dictWord{11, 11, 849}, + dictWord{11, 11, 913}, + dictWord{13, 11, 330}, + dictWord{13, 11, 401}, + dictWord{142, 11, 200}, + dictWord{149, 0, 26}, + dictWord{136, 11, 304}, + dictWord{132, 11, 142}, + dictWord{135, 0, 944}, + dictWord{ + 4, + 0, + 790, + }, + dictWord{5, 0, 273}, + dictWord{134, 0, 394}, + dictWord{134, 0, 855}, + dictWord{4, 0, 135}, + dictWord{6, 0, 127}, + dictWord{7, 0, 1185}, + dictWord{7, 0, 1511}, + dictWord{8, 0, 613}, + dictWord{11, 0, 5}, + dictWord{12, 0, 336}, + dictWord{12, 0, 495}, + dictWord{12, 0, 586}, + dictWord{12, 0, 660}, + dictWord{12, 0, 668}, + dictWord{ + 14, + 0, + 385, + }, + dictWord{15, 0, 118}, + dictWord{17, 0, 20}, + dictWord{146, 0, 98}, + dictWord{6, 0, 230}, + dictWord{9, 0, 752}, + dictWord{18, 0, 109}, + dictWord{12, 10, 610}, + dictWord{13, 10, 431}, + dictWord{144, 10, 59}, + dictWord{7, 0, 1954}, + dictWord{135, 11, 925}, + dictWord{4, 11, 471}, + dictWord{5, 11, 51}, + dictWord{6, 11, 602}, + dictWord{8, 11, 484}, + dictWord{10, 11, 195}, + dictWord{140, 11, 159}, + dictWord{132, 10, 307}, + dictWord{136, 11, 688}, + dictWord{132, 11, 697}, + dictWord{ + 7, + 11, + 812, + }, + dictWord{7, 11, 1261}, + dictWord{7, 11, 1360}, + dictWord{9, 11, 632}, + dictWord{140, 11, 352}, + dictWord{5, 0, 162}, + dictWord{8, 0, 68}, + dictWord{ + 133, + 10, + 964, + }, + dictWord{4, 0, 654}, + dictWord{136, 11, 212}, + dictWord{4, 0, 156}, + dictWord{7, 0, 998}, + dictWord{7, 0, 1045}, + dictWord{7, 0, 1860}, + dictWord{9, 0, 48}, + dictWord{9, 0, 692}, + dictWord{11, 0, 419}, + dictWord{139, 0, 602}, + dictWord{133, 11, 221}, + dictWord{4, 11, 373}, + dictWord{5, 11, 283}, + dictWord{6, 11, 480}, + dictWord{135, 11, 609}, + dictWord{142, 11, 216}, + dictWord{132, 0, 240}, + dictWord{6, 11, 192}, + dictWord{9, 11, 793}, + dictWord{145, 11, 55}, + dictWord{ + 4, + 10, + 75, + }, + dictWord{5, 10, 180}, + dictWord{6, 10, 500}, + dictWord{7, 10, 58}, + dictWord{7, 10, 710}, + dictWord{138, 10, 645}, + dictWord{4, 11, 132}, + dictWord{5, 11, 69}, + dictWord{5, 10, 649}, + dictWord{135, 11, 1242}, + dictWord{6, 10, 276}, + dictWord{7, 10, 282}, + dictWord{7, 10, 879}, + dictWord{7, 10, 924}, + dictWord{8, 10, 459}, + dictWord{9, 10, 599}, + dictWord{9, 10, 754}, + dictWord{11, 10, 574}, + dictWord{12, 10, 128}, + dictWord{12, 10, 494}, + dictWord{13, 10, 52}, + dictWord{13, 10, 301}, + dictWord{15, 10, 30}, + dictWord{143, 10, 132}, + dictWord{132, 10, 200}, + dictWord{4, 11, 111}, + dictWord{135, 11, 302}, + dictWord{9, 0, 197}, + dictWord{ + 10, + 0, + 300, + }, + dictWord{12, 0, 473}, + dictWord{13, 0, 90}, + dictWord{141, 0, 405}, + dictWord{132, 11, 767}, + dictWord{6, 11, 42}, + dictWord{7, 11, 1416}, + dictWord{ + 7, + 11, + 1590, + }, + dictWord{7, 11, 2005}, + dictWord{8, 11, 131}, + dictWord{8, 11, 466}, + dictWord{9, 11, 672}, + dictWord{13, 11, 252}, + dictWord{148, 11, 103}, + dictWord{ + 8, + 0, + 958, + }, + dictWord{8, 0, 999}, + dictWord{10, 0, 963}, + dictWord{138, 0, 1001}, + dictWord{135, 10, 1621}, + dictWord{135, 0, 858}, + dictWord{4, 0, 606}, + dictWord{ + 137, + 11, + 444, + }, + dictWord{6, 11, 44}, + dictWord{136, 11, 368}, + dictWord{139, 11, 172}, + dictWord{4, 11, 570}, + dictWord{133, 11, 120}, + dictWord{139, 11, 624}, + dictWord{7, 0, 1978}, + dictWord{8, 0, 676}, + dictWord{6, 10, 225}, + dictWord{137, 10, 211}, + dictWord{7, 0, 972}, + dictWord{11, 0, 102}, + dictWord{136, 10, 687}, + dictWord{6, 11, 227}, + dictWord{135, 11, 1589}, + dictWord{8, 10, 58}, + dictWord{9, 10, 724}, + dictWord{11, 10, 809}, + dictWord{13, 10, 113}, + dictWord{ + 145, + 10, + 72, + }, + dictWord{4, 0, 361}, + dictWord{133, 0, 315}, + dictWord{132, 0, 461}, + dictWord{6, 10, 345}, + dictWord{135, 10, 1247}, + dictWord{132, 0, 472}, + dictWord{ + 8, + 10, + 767, + }, + dictWord{8, 10, 803}, + dictWord{9, 10, 301}, + dictWord{137, 10, 903}, + dictWord{135, 11, 1333}, + dictWord{135, 11, 477}, + dictWord{7, 10, 1949}, + dictWord{136, 10, 674}, + dictWord{6, 0, 905}, + dictWord{138, 0, 747}, + dictWord{133, 0, 155}, + dictWord{134, 10, 259}, + dictWord{7, 0, 163}, + dictWord{8, 0, 319}, + dictWord{9, 0, 402}, + dictWord{10, 0, 24}, + dictWord{10, 0, 681}, + dictWord{11, 0, 200}, + dictWord{12, 0, 253}, + dictWord{12, 0, 410}, + dictWord{142, 0, 219}, + dictWord{ + 5, + 0, + 475, + }, + dictWord{7, 0, 1780}, + dictWord{9, 0, 230}, + dictWord{11, 0, 297}, + dictWord{11, 0, 558}, + dictWord{14, 0, 322}, + dictWord{19, 0, 76}, + dictWord{6, 11, 1667}, + dictWord{7, 11, 2036}, + dictWord{138, 11, 600}, + dictWord{136, 10, 254}, + dictWord{6, 0, 848}, + dictWord{135, 0, 1956}, + dictWord{6, 11, 511}, + dictWord{ + 140, + 11, + 132, + }, + dictWord{5, 11, 568}, + dictWord{6, 11, 138}, + dictWord{135, 11, 1293}, + dictWord{6, 0, 631}, + dictWord{137, 0, 838}, + dictWord{149, 0, 36}, + dictWord{ + 4, + 11, + 565, + }, + dictWord{8, 11, 23}, + dictWord{136, 11, 827}, + dictWord{5, 0, 944}, + dictWord{134, 0, 1769}, + dictWord{4, 0, 144}, + dictWord{6, 0, 842}, + dictWord{ + 6, + 0, + 1400, + }, + dictWord{4, 11, 922}, + dictWord{133, 11, 1023}, + dictWord{133, 10, 248}, + dictWord{9, 10, 800}, + dictWord{10, 10, 693}, + dictWord{11, 10, 482}, + dictWord{11, 10, 734}, + dictWord{139, 10, 789}, + dictWord{7, 11, 1002}, + dictWord{139, 11, 145}, + dictWord{4, 10, 116}, + dictWord{5, 10, 95}, + dictWord{5, 10, 445}, + dictWord{7, 10, 1688}, + dictWord{8, 10, 29}, + dictWord{9, 10, 272}, + dictWord{11, 10, 509}, + dictWord{139, 10, 915}, + dictWord{14, 0, 369}, + dictWord{146, 0, 72}, + dictWord{135, 10, 1641}, + dictWord{132, 11, 740}, + dictWord{133, 10, 543}, + dictWord{140, 11, 116}, + dictWord{6, 0, 247}, + dictWord{9, 0, 555}, + dictWord{ + 5, + 10, + 181, + }, + dictWord{136, 10, 41}, + dictWord{133, 10, 657}, + dictWord{136, 0, 996}, + dictWord{138, 10, 709}, + dictWord{7, 0, 189}, + dictWord{8, 10, 202}, + dictWord{ + 138, + 10, + 536, + }, + dictWord{136, 11, 402}, + dictWord{4, 11, 716}, + dictWord{141, 11, 31}, + dictWord{10, 0, 280}, + dictWord{138, 0, 797}, + dictWord{9, 10, 423}, + dictWord{140, 10, 89}, + dictWord{8, 10, 113}, + dictWord{9, 10, 877}, + dictWord{10, 10, 554}, + dictWord{11, 10, 83}, + dictWord{12, 10, 136}, + dictWord{147, 10, 109}, + dictWord{133, 10, 976}, + dictWord{7, 0, 746}, + dictWord{132, 10, 206}, + dictWord{136, 0, 526}, + dictWord{139, 0, 345}, + dictWord{136, 0, 1017}, + dictWord{ + 8, + 11, + 152, + }, + dictWord{9, 11, 53}, + dictWord{9, 11, 268}, + dictWord{9, 11, 901}, + dictWord{10, 11, 518}, + dictWord{10, 11, 829}, + dictWord{11, 11, 188}, + dictWord{ + 13, + 11, + 74, + }, + dictWord{14, 11, 46}, + dictWord{15, 11, 17}, + dictWord{15, 11, 33}, + dictWord{17, 11, 40}, + dictWord{18, 11, 36}, + dictWord{19, 11, 20}, + dictWord{22, 11, 1}, + dictWord{152, 11, 2}, + dictWord{133, 11, 736}, + dictWord{136, 11, 532}, + dictWord{5, 0, 428}, + dictWord{138, 0, 651}, + dictWord{135, 11, 681}, + dictWord{ + 135, + 0, + 1162, + }, + dictWord{7, 0, 327}, + dictWord{13, 0, 230}, + dictWord{17, 0, 113}, + dictWord{8, 10, 226}, + dictWord{10, 10, 537}, + dictWord{11, 10, 570}, + dictWord{ + 11, + 10, + 605, + }, + dictWord{11, 10, 799}, + dictWord{11, 10, 804}, + dictWord{12, 10, 85}, + dictWord{12, 10, 516}, + dictWord{12, 10, 623}, + dictWord{12, 11, 677}, + dictWord{ + 13, + 10, + 361, + }, + dictWord{14, 10, 77}, + dictWord{14, 10, 78}, + dictWord{147, 10, 110}, + dictWord{4, 0, 792}, + dictWord{7, 0, 1717}, + dictWord{10, 0, 546}, + dictWord{ + 132, + 10, + 769, + }, + dictWord{4, 11, 684}, + dictWord{136, 11, 384}, + dictWord{132, 10, 551}, + dictWord{134, 0, 1203}, + dictWord{9, 10, 57}, + dictWord{9, 10, 459}, + dictWord{10, 10, 425}, + dictWord{11, 10, 119}, + dictWord{12, 10, 184}, + dictWord{12, 10, 371}, + dictWord{13, 10, 358}, + dictWord{145, 10, 51}, + dictWord{5, 0, 672}, + dictWord{5, 10, 814}, + dictWord{8, 10, 10}, + dictWord{9, 10, 421}, + dictWord{9, 10, 729}, + dictWord{10, 10, 609}, + dictWord{139, 10, 689}, + dictWord{138, 0, 189}, + dictWord{134, 10, 624}, + dictWord{7, 11, 110}, + dictWord{7, 11, 188}, + dictWord{8, 11, 290}, + dictWord{8, 11, 591}, + dictWord{9, 11, 382}, + dictWord{9, 11, 649}, + dictWord{11, 11, 71}, + dictWord{11, 11, 155}, + dictWord{11, 11, 313}, + dictWord{12, 11, 5}, + dictWord{13, 11, 325}, + dictWord{142, 11, 287}, + dictWord{133, 0, 99}, + dictWord{6, 0, 1053}, + dictWord{135, 0, 298}, + dictWord{7, 11, 360}, + dictWord{7, 11, 425}, + dictWord{9, 11, 66}, + dictWord{9, 11, 278}, + dictWord{138, 11, 644}, + dictWord{4, 0, 397}, + dictWord{136, 0, 555}, + dictWord{137, 10, 269}, + dictWord{132, 10, 528}, + dictWord{4, 11, 900}, + dictWord{133, 11, 861}, + dictWord{ + 6, + 0, + 1157, + }, + dictWord{5, 11, 254}, + dictWord{7, 11, 985}, + dictWord{136, 11, 73}, + dictWord{7, 11, 1959}, + dictWord{136, 11, 683}, + dictWord{12, 0, 398}, + dictWord{ + 20, + 0, + 39, + }, + dictWord{21, 0, 11}, + dictWord{150, 0, 41}, + dictWord{4, 0, 485}, + dictWord{7, 0, 353}, + dictWord{135, 0, 1523}, + dictWord{6, 0, 366}, + dictWord{7, 0, 1384}, + dictWord{135, 0, 1601}, + dictWord{138, 0, 787}, + dictWord{137, 0, 282}, + dictWord{5, 10, 104}, + dictWord{6, 10, 173}, + dictWord{135, 10, 1631}, + dictWord{ + 139, + 11, + 146, + }, + dictWord{4, 0, 157}, + dictWord{133, 0, 471}, + dictWord{134, 0, 941}, + dictWord{132, 11, 725}, + dictWord{7, 0, 1336}, + dictWord{8, 10, 138}, + dictWord{ + 8, + 10, + 342, + }, + dictWord{9, 10, 84}, + dictWord{10, 10, 193}, + dictWord{11, 10, 883}, + dictWord{140, 10, 359}, + dictWord{134, 11, 196}, + dictWord{136, 0, 116}, + dictWord{133, 11, 831}, + dictWord{134, 0, 787}, + dictWord{134, 10, 95}, + dictWord{6, 10, 406}, + dictWord{10, 10, 409}, + dictWord{10, 10, 447}, + dictWord{ + 11, + 10, + 44, + }, + dictWord{140, 10, 100}, + dictWord{5, 0, 160}, + dictWord{7, 0, 363}, + dictWord{7, 0, 589}, + dictWord{10, 0, 170}, + dictWord{141, 0, 55}, + dictWord{134, 0, 1815}, + dictWord{132, 0, 866}, + dictWord{6, 0, 889}, + dictWord{6, 0, 1067}, + dictWord{6, 0, 1183}, + dictWord{4, 11, 321}, + dictWord{134, 11, 569}, + dictWord{5, 11, 848}, + dictWord{134, 11, 66}, + dictWord{4, 11, 36}, + dictWord{6, 10, 1636}, + dictWord{7, 11, 1387}, + dictWord{10, 11, 205}, + dictWord{11, 11, 755}, + dictWord{ + 141, + 11, + 271, + }, + dictWord{132, 0, 689}, + dictWord{9, 0, 820}, + dictWord{4, 10, 282}, + dictWord{7, 10, 1034}, + dictWord{11, 10, 398}, + dictWord{11, 10, 634}, + dictWord{ + 12, + 10, + 1, + }, + dictWord{12, 10, 79}, + dictWord{12, 10, 544}, + dictWord{14, 10, 237}, + dictWord{17, 10, 10}, + dictWord{146, 10, 20}, + dictWord{4, 0, 108}, + dictWord{7, 0, 804}, + dictWord{139, 0, 498}, + dictWord{132, 11, 887}, + dictWord{6, 0, 1119}, + dictWord{135, 11, 620}, + dictWord{6, 11, 165}, + dictWord{138, 11, 388}, + dictWord{ + 5, + 0, + 244, + }, + dictWord{5, 10, 499}, + dictWord{6, 10, 476}, + dictWord{7, 10, 600}, + dictWord{7, 10, 888}, + dictWord{135, 10, 1096}, + dictWord{140, 0, 609}, + dictWord{ + 135, + 0, + 1005, + }, + dictWord{4, 0, 412}, + dictWord{133, 0, 581}, + dictWord{4, 11, 719}, + dictWord{135, 11, 155}, + dictWord{7, 10, 296}, + dictWord{7, 10, 596}, + dictWord{ + 8, + 10, + 560, + }, + dictWord{8, 10, 586}, + dictWord{9, 10, 612}, + dictWord{11, 10, 304}, + dictWord{12, 10, 46}, + dictWord{13, 10, 89}, + dictWord{14, 10, 112}, + dictWord{ + 145, + 10, + 122, + }, + dictWord{4, 0, 895}, + dictWord{133, 0, 772}, + dictWord{142, 11, 307}, + dictWord{135, 0, 1898}, + dictWord{4, 0, 926}, + dictWord{133, 0, 983}, + dictWord{4, 11, 353}, + dictWord{6, 11, 146}, + dictWord{6, 11, 1789}, + dictWord{7, 11, 288}, + dictWord{7, 11, 990}, + dictWord{7, 11, 1348}, + dictWord{9, 11, 665}, + dictWord{ + 9, + 11, + 898, + }, + dictWord{11, 11, 893}, + dictWord{142, 11, 212}, + dictWord{132, 0, 538}, + dictWord{133, 11, 532}, + dictWord{6, 0, 294}, + dictWord{7, 0, 1267}, + dictWord{8, 0, 624}, + dictWord{141, 0, 496}, + dictWord{7, 0, 1325}, + dictWord{4, 11, 45}, + dictWord{135, 11, 1257}, + dictWord{138, 0, 301}, + dictWord{9, 0, 298}, + dictWord{12, 0, 291}, + dictWord{13, 0, 276}, + dictWord{14, 0, 6}, + dictWord{17, 0, 18}, + dictWord{21, 0, 32}, + dictWord{7, 10, 1599}, + dictWord{7, 10, 1723}, + dictWord{ + 8, + 10, + 79, + }, + dictWord{8, 10, 106}, + dictWord{8, 10, 190}, + dictWord{8, 10, 302}, + dictWord{8, 10, 383}, + dictWord{8, 10, 713}, + dictWord{9, 10, 119}, + dictWord{9, 10, 233}, + dictWord{9, 10, 419}, + dictWord{9, 10, 471}, + dictWord{10, 10, 181}, + dictWord{10, 10, 406}, + dictWord{11, 10, 57}, + dictWord{11, 10, 85}, + dictWord{11, 10, 120}, + dictWord{11, 10, 177}, + dictWord{11, 10, 296}, + dictWord{11, 10, 382}, + dictWord{11, 10, 454}, + dictWord{11, 10, 758}, + dictWord{11, 10, 999}, + dictWord{ + 12, + 10, + 27, + }, + dictWord{12, 10, 131}, + dictWord{12, 10, 245}, + dictWord{12, 10, 312}, + dictWord{12, 10, 446}, + dictWord{12, 10, 454}, + dictWord{13, 10, 98}, + dictWord{ + 13, + 10, + 426, + }, + dictWord{13, 10, 508}, + dictWord{14, 10, 163}, + dictWord{14, 10, 272}, + dictWord{14, 10, 277}, + dictWord{14, 10, 370}, + dictWord{15, 10, 95}, + dictWord{15, 10, 138}, + dictWord{15, 10, 167}, + dictWord{17, 10, 38}, + dictWord{148, 10, 96}, + dictWord{132, 0, 757}, + dictWord{134, 0, 1263}, + dictWord{4, 0, 820}, + dictWord{134, 10, 1759}, + dictWord{133, 0, 722}, + dictWord{136, 11, 816}, + dictWord{138, 10, 372}, + dictWord{145, 10, 16}, + dictWord{134, 0, 1039}, + dictWord{ + 4, + 0, + 991, + }, + dictWord{134, 0, 2028}, + dictWord{133, 10, 258}, + dictWord{7, 0, 1875}, + dictWord{139, 0, 124}, + dictWord{6, 11, 559}, + dictWord{6, 11, 1691}, + dictWord{135, 11, 586}, + dictWord{5, 0, 324}, + dictWord{7, 0, 881}, + dictWord{8, 10, 134}, + dictWord{9, 10, 788}, + dictWord{140, 10, 438}, + dictWord{7, 11, 1823}, + dictWord{139, 11, 693}, + dictWord{6, 0, 1348}, + dictWord{134, 0, 1545}, + dictWord{134, 0, 911}, + dictWord{132, 0, 954}, + dictWord{8, 0, 329}, + dictWord{8, 0, 414}, + dictWord{7, 10, 1948}, + dictWord{135, 10, 2004}, + dictWord{5, 0, 517}, + dictWord{6, 10, 439}, + dictWord{7, 10, 780}, + dictWord{135, 10, 1040}, + dictWord{ + 132, + 0, + 816, + }, + dictWord{5, 10, 1}, + dictWord{6, 10, 81}, + dictWord{138, 10, 520}, + dictWord{9, 0, 713}, + dictWord{10, 0, 222}, + dictWord{5, 10, 482}, + dictWord{8, 10, 98}, + dictWord{10, 10, 700}, + dictWord{10, 10, 822}, + dictWord{11, 10, 302}, + dictWord{11, 10, 778}, + dictWord{12, 10, 50}, + dictWord{12, 10, 127}, + dictWord{12, 10, 396}, + dictWord{13, 10, 62}, + dictWord{13, 10, 328}, + dictWord{14, 10, 122}, + dictWord{147, 10, 72}, + dictWord{137, 0, 33}, + dictWord{5, 10, 2}, + dictWord{7, 10, 1494}, + dictWord{136, 10, 589}, + dictWord{6, 10, 512}, + dictWord{7, 10, 797}, + dictWord{8, 10, 253}, + dictWord{9, 10, 77}, + dictWord{10, 10, 1}, + dictWord{10, 11, 108}, + dictWord{10, 10, 129}, + dictWord{10, 10, 225}, + dictWord{11, 11, 116}, + dictWord{11, 10, 118}, + dictWord{11, 10, 226}, + dictWord{11, 10, 251}, + dictWord{ + 11, + 10, + 430, + }, + dictWord{11, 10, 701}, + dictWord{11, 10, 974}, + dictWord{11, 10, 982}, + dictWord{12, 10, 64}, + dictWord{12, 10, 260}, + dictWord{12, 10, 488}, + dictWord{ + 140, + 10, + 690, + }, + dictWord{134, 11, 456}, + dictWord{133, 11, 925}, + dictWord{5, 0, 150}, + dictWord{7, 0, 106}, + dictWord{7, 0, 774}, + dictWord{8, 0, 603}, + dictWord{ + 9, + 0, + 593, + }, + dictWord{9, 0, 634}, + dictWord{10, 0, 44}, + dictWord{10, 0, 173}, + dictWord{11, 0, 462}, + dictWord{11, 0, 515}, + dictWord{13, 0, 216}, + dictWord{13, 0, 288}, + dictWord{142, 0, 400}, + dictWord{137, 10, 347}, + dictWord{5, 0, 748}, + dictWord{134, 0, 553}, + dictWord{12, 0, 108}, + dictWord{141, 0, 291}, + dictWord{7, 0, 420}, + dictWord{4, 10, 12}, + dictWord{7, 10, 522}, + dictWord{7, 10, 809}, + dictWord{8, 10, 797}, + dictWord{141, 10, 88}, + dictWord{6, 11, 193}, + dictWord{7, 11, 240}, + dictWord{ + 7, + 11, + 1682, + }, + dictWord{10, 11, 51}, + dictWord{10, 11, 640}, + dictWord{11, 11, 410}, + dictWord{13, 11, 82}, + dictWord{14, 11, 247}, + dictWord{14, 11, 331}, + dictWord{142, 11, 377}, + dictWord{133, 10, 528}, + dictWord{135, 0, 1777}, + dictWord{4, 0, 493}, + dictWord{144, 0, 55}, + dictWord{136, 11, 633}, + dictWord{ + 139, + 0, + 81, + }, + dictWord{6, 0, 980}, + dictWord{136, 0, 321}, + dictWord{148, 10, 109}, + dictWord{5, 10, 266}, + dictWord{9, 10, 290}, + dictWord{9, 10, 364}, + dictWord{ + 10, + 10, + 293, + }, + dictWord{11, 10, 606}, + dictWord{142, 10, 45}, + dictWord{6, 0, 568}, + dictWord{7, 0, 112}, + dictWord{7, 0, 1804}, + dictWord{8, 0, 362}, + dictWord{8, 0, 410}, + dictWord{8, 0, 830}, + dictWord{9, 0, 514}, + dictWord{11, 0, 649}, + dictWord{142, 0, 157}, + dictWord{4, 0, 74}, + dictWord{6, 0, 510}, + dictWord{6, 10, 594}, + dictWord{ + 9, + 10, + 121, + }, + dictWord{10, 10, 49}, + dictWord{10, 10, 412}, + dictWord{139, 10, 834}, + dictWord{134, 0, 838}, + dictWord{136, 10, 748}, + dictWord{132, 10, 466}, + dictWord{132, 0, 625}, + dictWord{135, 11, 1443}, + dictWord{4, 11, 237}, + dictWord{135, 11, 514}, + dictWord{9, 10, 378}, + dictWord{141, 10, 162}, + dictWord{6, 0, 16}, + dictWord{6, 0, 158}, + dictWord{7, 0, 43}, + dictWord{7, 0, 129}, + dictWord{7, 0, 181}, + dictWord{8, 0, 276}, + dictWord{8, 0, 377}, + dictWord{10, 0, 523}, + dictWord{ + 11, + 0, + 816, + }, + dictWord{12, 0, 455}, + dictWord{13, 0, 303}, + dictWord{142, 0, 135}, + dictWord{135, 0, 281}, + dictWord{4, 0, 1}, + dictWord{7, 0, 1143}, + dictWord{7, 0, 1463}, + dictWord{8, 0, 61}, + dictWord{9, 0, 207}, + dictWord{9, 0, 390}, + dictWord{9, 0, 467}, + dictWord{139, 0, 836}, + dictWord{6, 11, 392}, + dictWord{7, 11, 65}, + dictWord{ + 135, + 11, + 2019, + }, + dictWord{132, 10, 667}, + dictWord{4, 0, 723}, + dictWord{5, 0, 895}, + dictWord{7, 0, 1031}, + dictWord{8, 0, 199}, + dictWord{8, 0, 340}, + dictWord{9, 0, 153}, + dictWord{9, 0, 215}, + dictWord{10, 0, 21}, + dictWord{10, 0, 59}, + dictWord{10, 0, 80}, + dictWord{10, 0, 224}, + dictWord{10, 0, 838}, + dictWord{11, 0, 229}, + dictWord{ + 11, + 0, + 652, + }, + dictWord{12, 0, 192}, + dictWord{13, 0, 146}, + dictWord{142, 0, 91}, + dictWord{132, 0, 295}, + dictWord{137, 0, 51}, + dictWord{9, 11, 222}, + dictWord{ + 10, + 11, + 43, + }, + dictWord{139, 11, 900}, + dictWord{5, 0, 309}, + dictWord{140, 0, 211}, + dictWord{5, 0, 125}, + dictWord{8, 0, 77}, + dictWord{138, 0, 15}, + dictWord{136, 11, 604}, + dictWord{138, 0, 789}, + dictWord{5, 0, 173}, + dictWord{4, 10, 39}, + dictWord{7, 10, 1843}, + dictWord{8, 10, 407}, + dictWord{11, 10, 144}, + dictWord{140, 10, 523}, + dictWord{138, 11, 265}, + dictWord{133, 0, 439}, + dictWord{132, 10, 510}, + dictWord{7, 0, 648}, + dictWord{7, 0, 874}, + dictWord{11, 0, 164}, + dictWord{12, 0, 76}, + dictWord{18, 0, 9}, + dictWord{7, 10, 1980}, + dictWord{10, 10, 487}, + dictWord{138, 10, 809}, + dictWord{12, 0, 111}, + dictWord{14, 0, 294}, + dictWord{19, 0, 45}, + dictWord{13, 10, 260}, + dictWord{146, 10, 63}, + dictWord{133, 11, 549}, + dictWord{134, 10, 570}, + dictWord{4, 0, 8}, + dictWord{7, 0, 1152}, + dictWord{7, 0, 1153}, + dictWord{7, 0, 1715}, + dictWord{9, 0, 374}, + dictWord{10, 0, 478}, + dictWord{139, 0, 648}, + dictWord{135, 0, 1099}, + dictWord{5, 0, 575}, + dictWord{6, 0, 354}, + dictWord{ + 135, + 0, + 701, + }, + dictWord{7, 11, 36}, + dictWord{8, 11, 201}, + dictWord{136, 11, 605}, + dictWord{4, 10, 787}, + dictWord{136, 11, 156}, + dictWord{6, 0, 518}, + dictWord{ + 149, + 11, + 13, + }, + dictWord{140, 11, 224}, + dictWord{134, 0, 702}, + dictWord{132, 10, 516}, + dictWord{5, 11, 724}, + dictWord{10, 11, 305}, + dictWord{11, 11, 151}, + dictWord{12, 11, 33}, + dictWord{12, 11, 121}, + dictWord{12, 11, 381}, + dictWord{17, 11, 3}, + dictWord{17, 11, 27}, + dictWord{17, 11, 78}, + dictWord{18, 11, 18}, + dictWord{19, 11, 54}, + dictWord{149, 11, 5}, + dictWord{8, 0, 87}, + dictWord{4, 11, 523}, + dictWord{5, 11, 638}, + dictWord{11, 10, 887}, + dictWord{14, 10, 365}, + dictWord{ + 142, + 10, + 375, + }, + dictWord{138, 0, 438}, + dictWord{136, 10, 821}, + dictWord{135, 11, 1908}, + dictWord{6, 11, 242}, + dictWord{7, 11, 227}, + dictWord{7, 11, 1581}, + dictWord{8, 11, 104}, + dictWord{9, 11, 113}, + dictWord{9, 11, 220}, + dictWord{9, 11, 427}, + dictWord{10, 11, 74}, + dictWord{10, 11, 239}, + dictWord{11, 11, 579}, + dictWord{11, 11, 1023}, + dictWord{13, 11, 4}, + dictWord{13, 11, 204}, + dictWord{13, 11, 316}, + dictWord{18, 11, 95}, + dictWord{148, 11, 86}, + dictWord{4, 0, 69}, + dictWord{5, 0, 122}, + dictWord{5, 0, 849}, + dictWord{6, 0, 1633}, + dictWord{9, 0, 656}, + dictWord{138, 0, 464}, + dictWord{7, 0, 1802}, + dictWord{4, 10, 10}, + dictWord{ + 139, + 10, + 786, + }, + dictWord{135, 11, 861}, + dictWord{139, 0, 499}, + dictWord{7, 0, 476}, + dictWord{7, 0, 1592}, + dictWord{138, 0, 87}, + dictWord{133, 10, 684}, + dictWord{ + 4, + 0, + 840, + }, + dictWord{134, 10, 27}, + dictWord{142, 0, 283}, + dictWord{6, 0, 1620}, + dictWord{7, 11, 1328}, + dictWord{136, 11, 494}, + dictWord{5, 0, 859}, + dictWord{ + 7, + 0, + 1160, + }, + dictWord{8, 0, 107}, + dictWord{9, 0, 291}, + dictWord{9, 0, 439}, + dictWord{10, 0, 663}, + dictWord{11, 0, 609}, + dictWord{140, 0, 197}, + dictWord{ + 7, + 11, + 1306, + }, + dictWord{8, 11, 505}, + dictWord{9, 11, 482}, + dictWord{10, 11, 126}, + dictWord{11, 11, 225}, + dictWord{12, 11, 347}, + dictWord{12, 11, 449}, + dictWord{ + 13, + 11, + 19, + }, + dictWord{142, 11, 218}, + dictWord{5, 11, 268}, + dictWord{10, 11, 764}, + dictWord{12, 11, 120}, + dictWord{13, 11, 39}, + dictWord{145, 11, 127}, + dictWord{145, 10, 56}, + dictWord{7, 11, 1672}, + dictWord{10, 11, 472}, + dictWord{11, 11, 189}, + dictWord{143, 11, 51}, + dictWord{6, 10, 342}, + dictWord{6, 10, 496}, + dictWord{8, 10, 275}, + dictWord{137, 10, 206}, + dictWord{133, 0, 600}, + dictWord{4, 0, 117}, + dictWord{6, 0, 372}, + dictWord{7, 0, 1905}, + dictWord{142, 0, 323}, + dictWord{4, 10, 909}, + dictWord{5, 10, 940}, + dictWord{135, 11, 1471}, + dictWord{132, 10, 891}, + dictWord{4, 0, 722}, + dictWord{139, 0, 471}, + dictWord{4, 11, 384}, + dictWord{135, 11, 1022}, + dictWord{132, 10, 687}, + dictWord{9, 0, 5}, + dictWord{12, 0, 216}, + dictWord{12, 0, 294}, + dictWord{12, 0, 298}, + dictWord{12, 0, 400}, + dictWord{12, 0, 518}, + dictWord{13, 0, 229}, + dictWord{143, 0, 139}, + dictWord{135, 11, 1703}, + dictWord{7, 11, 1602}, + dictWord{10, 11, 698}, + dictWord{ + 12, + 11, + 212, + }, + dictWord{141, 11, 307}, + dictWord{6, 10, 41}, + dictWord{141, 10, 160}, + dictWord{135, 11, 1077}, + dictWord{9, 11, 159}, + dictWord{11, 11, 28}, + dictWord{140, 11, 603}, + dictWord{4, 0, 514}, + dictWord{7, 0, 1304}, + dictWord{138, 0, 477}, + dictWord{134, 0, 1774}, + dictWord{9, 0, 88}, + dictWord{139, 0, 270}, + dictWord{5, 0, 12}, + dictWord{7, 0, 375}, + dictWord{9, 0, 438}, + dictWord{134, 10, 1718}, + dictWord{132, 11, 515}, + dictWord{136, 10, 778}, + dictWord{8, 11, 632}, + dictWord{8, 11, 697}, + dictWord{137, 11, 854}, + dictWord{6, 0, 362}, + dictWord{6, 0, 997}, + dictWord{146, 0, 51}, + dictWord{7, 0, 816}, + dictWord{7, 0, 1241}, + dictWord{ + 9, + 0, + 283, + }, + dictWord{9, 0, 520}, + dictWord{10, 0, 213}, + dictWord{10, 0, 307}, + dictWord{10, 0, 463}, + dictWord{10, 0, 671}, + dictWord{10, 0, 746}, + dictWord{11, 0, 401}, + dictWord{11, 0, 794}, + dictWord{12, 0, 517}, + dictWord{18, 0, 107}, + dictWord{147, 0, 115}, + dictWord{133, 10, 115}, + dictWord{150, 11, 28}, + dictWord{4, 11, 136}, + dictWord{133, 11, 551}, + dictWord{142, 10, 314}, + dictWord{132, 0, 258}, + dictWord{6, 0, 22}, + dictWord{7, 0, 903}, + dictWord{7, 0, 1963}, + dictWord{8, 0, 639}, + dictWord{138, 0, 577}, + dictWord{5, 0, 681}, + dictWord{8, 0, 782}, + dictWord{13, 0, 130}, + dictWord{17, 0, 84}, + dictWord{5, 10, 193}, + dictWord{140, 10, 178}, + dictWord{ + 9, + 11, + 17, + }, + dictWord{138, 11, 291}, + dictWord{7, 11, 1287}, + dictWord{9, 11, 44}, + dictWord{10, 11, 552}, + dictWord{10, 11, 642}, + dictWord{11, 11, 839}, + dictWord{12, 11, 274}, + dictWord{12, 11, 275}, + dictWord{12, 11, 372}, + dictWord{13, 11, 91}, + dictWord{142, 11, 125}, + dictWord{135, 10, 174}, + dictWord{4, 0, 664}, + dictWord{5, 0, 804}, + dictWord{139, 0, 1013}, + dictWord{134, 0, 942}, + dictWord{6, 0, 1349}, + dictWord{6, 0, 1353}, + dictWord{6, 0, 1450}, + dictWord{7, 11, 1518}, + dictWord{139, 11, 694}, + dictWord{11, 0, 356}, + dictWord{4, 10, 122}, + dictWord{5, 10, 796}, + dictWord{5, 10, 952}, + dictWord{6, 10, 1660}, + dictWord{ + 6, + 10, + 1671, + }, + dictWord{8, 10, 567}, + dictWord{9, 10, 687}, + dictWord{9, 10, 742}, + dictWord{10, 10, 686}, + dictWord{11, 10, 682}, + dictWord{140, 10, 281}, + dictWord{ + 5, + 0, + 32, + }, + dictWord{6, 11, 147}, + dictWord{7, 11, 886}, + dictWord{9, 11, 753}, + dictWord{138, 11, 268}, + dictWord{5, 10, 179}, + dictWord{7, 10, 1095}, + dictWord{ + 135, + 10, + 1213, + }, + dictWord{4, 10, 66}, + dictWord{7, 10, 722}, + dictWord{135, 10, 904}, + dictWord{135, 10, 352}, + dictWord{9, 11, 245}, + dictWord{138, 11, 137}, + dictWord{4, 0, 289}, + dictWord{7, 0, 629}, + dictWord{7, 0, 1698}, + dictWord{7, 0, 1711}, + dictWord{12, 0, 215}, + dictWord{133, 11, 414}, + dictWord{6, 0, 1975}, + dictWord{135, 11, 1762}, + dictWord{6, 0, 450}, + dictWord{136, 0, 109}, + dictWord{141, 10, 35}, + dictWord{134, 11, 599}, + dictWord{136, 0, 705}, + dictWord{ + 133, + 0, + 664, + }, + dictWord{134, 11, 1749}, + dictWord{11, 11, 402}, + dictWord{12, 11, 109}, + dictWord{12, 11, 431}, + dictWord{13, 11, 179}, + dictWord{13, 11, 206}, + dictWord{14, 11, 175}, + dictWord{14, 11, 217}, + dictWord{16, 11, 3}, + dictWord{148, 11, 53}, + dictWord{135, 0, 1238}, + dictWord{134, 11, 1627}, + dictWord{ + 132, + 11, + 488, + }, + dictWord{13, 0, 318}, + dictWord{10, 10, 592}, + dictWord{10, 10, 753}, + dictWord{12, 10, 317}, + dictWord{12, 10, 355}, + dictWord{12, 10, 465}, + dictWord{ + 12, + 10, + 469, + }, + dictWord{12, 10, 560}, + dictWord{140, 10, 578}, + dictWord{133, 10, 564}, + dictWord{132, 11, 83}, + dictWord{140, 11, 676}, + dictWord{6, 0, 1872}, + dictWord{6, 0, 1906}, + dictWord{6, 0, 1907}, + dictWord{9, 0, 934}, + dictWord{9, 0, 956}, + dictWord{9, 0, 960}, + dictWord{9, 0, 996}, + dictWord{12, 0, 794}, + dictWord{ + 12, + 0, + 876, + }, + dictWord{12, 0, 880}, + dictWord{12, 0, 918}, + dictWord{15, 0, 230}, + dictWord{18, 0, 234}, + dictWord{18, 0, 238}, + dictWord{21, 0, 38}, + dictWord{149, 0, 62}, + dictWord{134, 10, 556}, + dictWord{134, 11, 278}, + dictWord{137, 0, 103}, + dictWord{7, 10, 544}, + dictWord{8, 10, 719}, + dictWord{138, 10, 61}, + dictWord{ + 4, + 10, + 5, + }, + dictWord{5, 10, 498}, + dictWord{8, 10, 637}, + dictWord{137, 10, 521}, + dictWord{7, 0, 777}, + dictWord{12, 0, 229}, + dictWord{12, 0, 239}, + dictWord{15, 0, 12}, + dictWord{12, 11, 229}, + dictWord{12, 11, 239}, + dictWord{143, 11, 12}, + dictWord{6, 0, 26}, + dictWord{7, 11, 388}, + dictWord{7, 11, 644}, + dictWord{139, 11, 781}, + dictWord{7, 11, 229}, + dictWord{8, 11, 59}, + dictWord{9, 11, 190}, + dictWord{9, 11, 257}, + dictWord{10, 11, 378}, + dictWord{140, 11, 191}, + dictWord{133, 10, 927}, + dictWord{135, 10, 1441}, + dictWord{4, 10, 893}, + dictWord{5, 10, 780}, + dictWord{133, 10, 893}, + dictWord{4, 0, 414}, + dictWord{5, 0, 467}, + dictWord{9, 0, 654}, + dictWord{10, 0, 451}, + dictWord{12, 0, 59}, + dictWord{141, 0, 375}, + dictWord{142, 0, 173}, + dictWord{135, 0, 17}, + dictWord{7, 0, 1350}, + dictWord{133, 10, 238}, + dictWord{135, 0, 955}, + dictWord{4, 0, 960}, + dictWord{10, 0, 887}, + dictWord{12, 0, 753}, + dictWord{18, 0, 161}, + dictWord{18, 0, 162}, + dictWord{152, 0, 19}, + dictWord{136, 11, 344}, + dictWord{6, 10, 1729}, + dictWord{137, 11, 288}, + dictWord{132, 11, 660}, + dictWord{4, 0, 217}, + dictWord{5, 0, 710}, + dictWord{7, 0, 760}, + dictWord{7, 0, 1926}, + dictWord{9, 0, 428}, + dictWord{9, 0, 708}, + dictWord{10, 0, 254}, + dictWord{10, 0, 296}, + dictWord{10, 0, 720}, + dictWord{11, 0, 109}, + dictWord{ + 11, + 0, + 255, + }, + dictWord{12, 0, 165}, + dictWord{12, 0, 315}, + dictWord{13, 0, 107}, + dictWord{13, 0, 203}, + dictWord{14, 0, 54}, + dictWord{14, 0, 99}, + dictWord{14, 0, 114}, + dictWord{14, 0, 388}, + dictWord{16, 0, 85}, + dictWord{17, 0, 9}, + dictWord{17, 0, 33}, + dictWord{20, 0, 25}, + dictWord{20, 0, 28}, + dictWord{20, 0, 29}, + dictWord{21, 0, 9}, + dictWord{21, 0, 10}, + dictWord{21, 0, 34}, + dictWord{22, 0, 17}, + dictWord{4, 10, 60}, + dictWord{7, 10, 1800}, + dictWord{8, 10, 314}, + dictWord{9, 10, 700}, + dictWord{ + 139, + 10, + 487, + }, + dictWord{7, 11, 1035}, + dictWord{138, 11, 737}, + dictWord{7, 11, 690}, + dictWord{9, 11, 217}, + dictWord{9, 11, 587}, + dictWord{140, 11, 521}, + dictWord{6, 0, 919}, + dictWord{7, 11, 706}, + dictWord{7, 11, 1058}, + dictWord{138, 11, 538}, + dictWord{7, 10, 1853}, + dictWord{138, 10, 437}, + dictWord{ + 136, + 10, + 419, + }, + dictWord{6, 0, 280}, + dictWord{10, 0, 502}, + dictWord{11, 0, 344}, + dictWord{140, 0, 38}, + dictWord{5, 0, 45}, + dictWord{7, 0, 1161}, + dictWord{11, 0, 448}, + dictWord{11, 0, 880}, + dictWord{13, 0, 139}, + dictWord{13, 0, 407}, + dictWord{15, 0, 16}, + dictWord{17, 0, 95}, + dictWord{18, 0, 66}, + dictWord{18, 0, 88}, + dictWord{ + 18, + 0, + 123, + }, + dictWord{149, 0, 7}, + dictWord{11, 11, 92}, + dictWord{11, 11, 196}, + dictWord{11, 11, 409}, + dictWord{11, 11, 450}, + dictWord{11, 11, 666}, + dictWord{ + 11, + 11, + 777, + }, + dictWord{12, 11, 262}, + dictWord{13, 11, 385}, + dictWord{13, 11, 393}, + dictWord{15, 11, 115}, + dictWord{16, 11, 45}, + dictWord{145, 11, 82}, + dictWord{136, 0, 777}, + dictWord{134, 11, 1744}, + dictWord{4, 0, 410}, + dictWord{7, 0, 521}, + dictWord{133, 10, 828}, + dictWord{134, 0, 673}, + dictWord{7, 0, 1110}, + dictWord{7, 0, 1778}, + dictWord{7, 10, 176}, + dictWord{135, 10, 178}, + dictWord{5, 10, 806}, + dictWord{7, 11, 268}, + dictWord{7, 10, 1976}, + dictWord{ + 136, + 11, + 569, + }, + dictWord{4, 11, 733}, + dictWord{9, 11, 194}, + dictWord{10, 11, 92}, + dictWord{11, 11, 198}, + dictWord{12, 11, 84}, + dictWord{12, 11, 87}, + dictWord{ + 13, + 11, + 128, + }, + dictWord{144, 11, 74}, + dictWord{5, 0, 341}, + dictWord{7, 0, 1129}, + dictWord{11, 0, 414}, + dictWord{4, 10, 51}, + dictWord{6, 10, 4}, + dictWord{7, 10, 591}, + dictWord{7, 10, 849}, + dictWord{7, 10, 951}, + dictWord{7, 10, 1613}, + dictWord{7, 10, 1760}, + dictWord{7, 10, 1988}, + dictWord{9, 10, 434}, + dictWord{10, 10, 754}, + dictWord{11, 10, 25}, + dictWord{139, 10, 37}, + dictWord{133, 10, 902}, + dictWord{135, 10, 928}, + dictWord{135, 0, 787}, + dictWord{132, 0, 436}, + dictWord{ + 134, + 10, + 270, + }, + dictWord{7, 0, 1587}, + dictWord{135, 0, 1707}, + dictWord{6, 0, 377}, + dictWord{7, 0, 1025}, + dictWord{9, 0, 613}, + dictWord{145, 0, 104}, + dictWord{ + 7, + 11, + 982, + }, + dictWord{7, 11, 1361}, + dictWord{10, 11, 32}, + dictWord{143, 11, 56}, + dictWord{139, 0, 96}, + dictWord{132, 0, 451}, + dictWord{132, 10, 416}, + dictWord{ + 142, + 10, + 372, + }, + dictWord{5, 10, 152}, + dictWord{5, 10, 197}, + dictWord{7, 11, 306}, + dictWord{7, 10, 340}, + dictWord{7, 10, 867}, + dictWord{10, 10, 548}, + dictWord{ + 10, + 10, + 581, + }, + dictWord{11, 10, 6}, + dictWord{12, 10, 3}, + dictWord{12, 10, 19}, + dictWord{14, 10, 110}, + dictWord{142, 10, 289}, + dictWord{134, 0, 680}, + dictWord{ + 134, + 11, + 609, + }, + dictWord{7, 0, 483}, + dictWord{7, 10, 190}, + dictWord{8, 10, 28}, + dictWord{8, 10, 141}, + dictWord{8, 10, 444}, + dictWord{8, 10, 811}, + dictWord{ + 9, + 10, + 468, + }, + dictWord{11, 10, 334}, + dictWord{12, 10, 24}, + dictWord{12, 10, 386}, + dictWord{140, 10, 576}, + dictWord{10, 0, 916}, + dictWord{133, 10, 757}, + dictWord{ + 5, + 10, + 721, + }, + dictWord{135, 10, 1553}, + dictWord{133, 11, 178}, + dictWord{134, 0, 937}, + dictWord{132, 10, 898}, + dictWord{133, 0, 739}, + dictWord{ + 147, + 0, + 82, + }, + dictWord{135, 0, 663}, + dictWord{146, 0, 128}, + dictWord{5, 10, 277}, + dictWord{141, 10, 247}, + dictWord{134, 0, 1087}, + dictWord{132, 10, 435}, + dictWord{ + 6, + 11, + 381, + }, + dictWord{7, 11, 645}, + dictWord{7, 11, 694}, + dictWord{136, 11, 546}, + dictWord{7, 0, 503}, + dictWord{135, 0, 1885}, + dictWord{6, 0, 1965}, + dictWord{ + 8, + 0, + 925, + }, + dictWord{138, 0, 955}, + dictWord{4, 0, 113}, + dictWord{5, 0, 163}, + dictWord{5, 0, 735}, + dictWord{7, 0, 1009}, + dictWord{9, 0, 9}, + dictWord{9, 0, 771}, + dictWord{12, 0, 90}, + dictWord{13, 0, 138}, + dictWord{13, 0, 410}, + dictWord{143, 0, 128}, + dictWord{4, 0, 324}, + dictWord{138, 0, 104}, + dictWord{7, 0, 460}, + dictWord{ + 5, + 10, + 265, + }, + dictWord{134, 10, 212}, + dictWord{133, 11, 105}, + dictWord{7, 11, 261}, + dictWord{7, 11, 1107}, + dictWord{7, 11, 1115}, + dictWord{7, 11, 1354}, + dictWord{7, 11, 1588}, + dictWord{7, 11, 1705}, + dictWord{7, 11, 1902}, + dictWord{9, 11, 465}, + dictWord{10, 11, 248}, + dictWord{10, 11, 349}, + dictWord{10, 11, 647}, + dictWord{11, 11, 527}, + dictWord{11, 11, 660}, + dictWord{11, 11, 669}, + dictWord{12, 11, 529}, + dictWord{141, 11, 305}, + dictWord{5, 11, 438}, + dictWord{ + 9, + 11, + 694, + }, + dictWord{12, 11, 627}, + dictWord{141, 11, 210}, + dictWord{152, 11, 11}, + dictWord{4, 0, 935}, + dictWord{133, 0, 823}, + dictWord{132, 10, 702}, + dictWord{ + 5, + 0, + 269, + }, + dictWord{7, 0, 434}, + dictWord{7, 0, 891}, + dictWord{8, 0, 339}, + dictWord{9, 0, 702}, + dictWord{11, 0, 594}, + dictWord{11, 0, 718}, + dictWord{17, 0, 100}, + dictWord{5, 10, 808}, + dictWord{135, 10, 2045}, + dictWord{7, 0, 1014}, + dictWord{9, 0, 485}, + dictWord{141, 0, 264}, + dictWord{134, 0, 1713}, + dictWord{7, 0, 1810}, + dictWord{11, 0, 866}, + dictWord{12, 0, 103}, + dictWord{13, 0, 495}, + dictWord{140, 11, 233}, + dictWord{4, 0, 423}, + dictWord{10, 0, 949}, + dictWord{138, 0, 1013}, + dictWord{135, 0, 900}, + dictWord{8, 11, 25}, + dictWord{138, 11, 826}, + dictWord{5, 10, 166}, + dictWord{8, 10, 739}, + dictWord{140, 10, 511}, + dictWord{ + 134, + 0, + 2018, + }, + dictWord{7, 11, 1270}, + dictWord{139, 11, 612}, + dictWord{4, 10, 119}, + dictWord{5, 10, 170}, + dictWord{5, 10, 447}, + dictWord{7, 10, 1708}, + dictWord{ + 7, + 10, + 1889, + }, + dictWord{9, 10, 357}, + dictWord{9, 10, 719}, + dictWord{12, 10, 486}, + dictWord{140, 10, 596}, + dictWord{12, 0, 574}, + dictWord{140, 11, 574}, + dictWord{132, 11, 308}, + dictWord{6, 0, 964}, + dictWord{6, 0, 1206}, + dictWord{134, 0, 1302}, + dictWord{4, 10, 450}, + dictWord{135, 10, 1158}, + dictWord{ + 135, + 11, + 150, + }, + dictWord{136, 11, 649}, + dictWord{14, 0, 213}, + dictWord{148, 0, 38}, + dictWord{9, 11, 45}, + dictWord{9, 11, 311}, + dictWord{141, 11, 42}, + dictWord{ + 134, + 11, + 521, + }, + dictWord{7, 10, 1375}, + dictWord{7, 10, 1466}, + dictWord{138, 10, 331}, + dictWord{132, 10, 754}, + dictWord{5, 11, 339}, + dictWord{7, 11, 1442}, + dictWord{14, 11, 3}, + dictWord{15, 11, 41}, + dictWord{147, 11, 66}, + dictWord{136, 11, 378}, + dictWord{134, 0, 1022}, + dictWord{5, 10, 850}, + dictWord{136, 10, 799}, + dictWord{142, 0, 143}, + dictWord{135, 0, 2029}, + dictWord{134, 11, 1628}, + dictWord{8, 0, 523}, + dictWord{150, 0, 34}, + dictWord{5, 0, 625}, + dictWord{ + 135, + 0, + 1617, + }, + dictWord{7, 0, 275}, + dictWord{7, 10, 238}, + dictWord{7, 10, 2033}, + dictWord{8, 10, 120}, + dictWord{8, 10, 188}, + dictWord{8, 10, 659}, + dictWord{ + 9, + 10, + 598, + }, + dictWord{10, 10, 466}, + dictWord{12, 10, 342}, + dictWord{12, 10, 588}, + dictWord{13, 10, 503}, + dictWord{14, 10, 246}, + dictWord{143, 10, 92}, + dictWord{ + 7, + 0, + 37, + }, + dictWord{8, 0, 425}, + dictWord{8, 0, 693}, + dictWord{9, 0, 720}, + dictWord{10, 0, 380}, + dictWord{10, 0, 638}, + dictWord{11, 0, 273}, + dictWord{11, 0, 473}, + dictWord{12, 0, 61}, + dictWord{143, 0, 43}, + dictWord{135, 11, 829}, + dictWord{135, 0, 1943}, + dictWord{132, 0, 765}, + dictWord{5, 11, 486}, + dictWord{ + 135, + 11, + 1349, + }, + dictWord{7, 11, 1635}, + dictWord{8, 11, 17}, + dictWord{10, 11, 217}, + dictWord{138, 11, 295}, + dictWord{4, 10, 201}, + dictWord{7, 10, 1744}, + dictWord{ + 8, + 10, + 602, + }, + dictWord{11, 10, 247}, + dictWord{11, 10, 826}, + dictWord{145, 10, 65}, + dictWord{138, 11, 558}, + dictWord{11, 0, 551}, + dictWord{142, 0, 159}, + dictWord{8, 10, 164}, + dictWord{146, 10, 62}, + dictWord{139, 11, 176}, + dictWord{132, 0, 168}, + dictWord{136, 0, 1010}, + dictWord{134, 0, 1994}, + dictWord{ + 135, + 0, + 91, + }, + dictWord{138, 0, 532}, + dictWord{135, 10, 1243}, + dictWord{135, 0, 1884}, + dictWord{132, 10, 907}, + dictWord{5, 10, 100}, + dictWord{10, 10, 329}, + dictWord{12, 10, 416}, + dictWord{149, 10, 29}, + dictWord{134, 11, 447}, + dictWord{132, 10, 176}, + dictWord{5, 10, 636}, + dictWord{5, 10, 998}, + dictWord{7, 10, 9}, + dictWord{7, 10, 1508}, + dictWord{8, 10, 26}, + dictWord{9, 10, 317}, + dictWord{9, 10, 358}, + dictWord{10, 10, 210}, + dictWord{10, 10, 292}, + dictWord{10, 10, 533}, + dictWord{11, 10, 555}, + dictWord{12, 10, 526}, + dictWord{12, 10, 607}, + dictWord{13, 10, 263}, + dictWord{13, 10, 459}, + dictWord{142, 10, 271}, + dictWord{ + 4, + 11, + 609, + }, + dictWord{135, 11, 756}, + dictWord{6, 0, 15}, + dictWord{7, 0, 70}, + dictWord{10, 0, 240}, + dictWord{147, 0, 93}, + dictWord{4, 11, 930}, + dictWord{133, 11, 947}, + dictWord{134, 0, 1227}, + dictWord{134, 0, 1534}, + dictWord{133, 11, 939}, + dictWord{133, 11, 962}, + dictWord{5, 11, 651}, + dictWord{8, 11, 170}, + dictWord{ + 9, + 11, + 61, + }, + dictWord{9, 11, 63}, + dictWord{10, 11, 23}, + dictWord{10, 11, 37}, + dictWord{10, 11, 834}, + dictWord{11, 11, 4}, + dictWord{11, 11, 187}, + dictWord{ + 11, + 11, + 281, + }, + dictWord{11, 11, 503}, + dictWord{11, 11, 677}, + dictWord{12, 11, 96}, + dictWord{12, 11, 130}, + dictWord{12, 11, 244}, + dictWord{14, 11, 5}, + dictWord{ + 14, + 11, + 40, + }, + dictWord{14, 11, 162}, + dictWord{14, 11, 202}, + dictWord{146, 11, 133}, + dictWord{4, 11, 406}, + dictWord{5, 11, 579}, + dictWord{12, 11, 492}, + dictWord{ + 150, + 11, + 15, + }, + dictWord{139, 0, 392}, + dictWord{6, 10, 610}, + dictWord{10, 10, 127}, + dictWord{141, 10, 27}, + dictWord{7, 0, 655}, + dictWord{7, 0, 1844}, + dictWord{ + 136, + 10, + 119, + }, + dictWord{4, 0, 145}, + dictWord{6, 0, 176}, + dictWord{7, 0, 395}, + dictWord{137, 0, 562}, + dictWord{132, 0, 501}, + dictWord{140, 11, 145}, + dictWord{ + 136, + 0, + 1019, + }, + dictWord{134, 0, 509}, + dictWord{139, 0, 267}, + dictWord{6, 11, 17}, + dictWord{7, 11, 16}, + dictWord{7, 11, 1001}, + dictWord{7, 11, 1982}, + dictWord{ + 9, + 11, + 886, + }, + dictWord{10, 11, 489}, + dictWord{10, 11, 800}, + dictWord{11, 11, 782}, + dictWord{12, 11, 320}, + dictWord{13, 11, 467}, + dictWord{14, 11, 145}, + dictWord{14, 11, 387}, + dictWord{143, 11, 119}, + dictWord{145, 11, 17}, + dictWord{6, 0, 1099}, + dictWord{133, 11, 458}, + dictWord{7, 11, 1983}, + dictWord{8, 11, 0}, + dictWord{8, 11, 171}, + dictWord{9, 11, 120}, + dictWord{9, 11, 732}, + dictWord{10, 11, 473}, + dictWord{11, 11, 656}, + dictWord{11, 11, 998}, + dictWord{18, 11, 0}, + dictWord{18, 11, 2}, + dictWord{147, 11, 21}, + dictWord{12, 11, 427}, + dictWord{146, 11, 38}, + dictWord{10, 0, 948}, + dictWord{138, 0, 968}, + dictWord{7, 10, 126}, + dictWord{136, 10, 84}, + dictWord{136, 10, 790}, + dictWord{4, 0, 114}, + dictWord{9, 0, 492}, + dictWord{13, 0, 462}, + dictWord{142, 0, 215}, + dictWord{6, 10, 64}, + dictWord{12, 10, 377}, + dictWord{141, 10, 309}, + dictWord{4, 0, 77}, + dictWord{5, 0, 361}, + dictWord{6, 0, 139}, + dictWord{6, 0, 401}, + dictWord{6, 0, 404}, + dictWord{ + 7, + 0, + 413, + }, + dictWord{7, 0, 715}, + dictWord{7, 0, 1716}, + dictWord{11, 0, 279}, + dictWord{12, 0, 179}, + dictWord{12, 0, 258}, + dictWord{13, 0, 244}, + dictWord{142, 0, 358}, + dictWord{134, 0, 1717}, + dictWord{7, 0, 772}, + dictWord{7, 0, 1061}, + dictWord{7, 0, 1647}, + dictWord{8, 0, 82}, + dictWord{11, 0, 250}, + dictWord{11, 0, 607}, + dictWord{12, 0, 311}, + dictWord{12, 0, 420}, + dictWord{13, 0, 184}, + dictWord{13, 0, 367}, + dictWord{7, 10, 1104}, + dictWord{11, 10, 269}, + dictWord{11, 10, 539}, + dictWord{11, 10, 627}, + dictWord{11, 10, 706}, + dictWord{11, 10, 975}, + dictWord{12, 10, 248}, + dictWord{12, 10, 434}, + dictWord{12, 10, 600}, + dictWord{ + 12, + 10, + 622, + }, + dictWord{13, 10, 297}, + dictWord{13, 10, 485}, + dictWord{14, 10, 69}, + dictWord{14, 10, 409}, + dictWord{143, 10, 108}, + dictWord{135, 0, 724}, + dictWord{ + 4, + 11, + 512, + }, + dictWord{4, 11, 519}, + dictWord{133, 11, 342}, + dictWord{134, 0, 1133}, + dictWord{145, 11, 29}, + dictWord{11, 10, 977}, + dictWord{141, 10, 507}, + dictWord{6, 0, 841}, + dictWord{6, 0, 1042}, + dictWord{6, 0, 1194}, + dictWord{10, 0, 993}, + dictWord{140, 0, 1021}, + dictWord{6, 11, 31}, + dictWord{7, 11, 491}, + dictWord{7, 11, 530}, + dictWord{8, 11, 592}, + dictWord{9, 10, 34}, + dictWord{11, 11, 53}, + dictWord{11, 10, 484}, + dictWord{11, 11, 779}, + dictWord{12, 11, 167}, + dictWord{12, 11, 411}, + dictWord{14, 11, 14}, + dictWord{14, 11, 136}, + dictWord{15, 11, 72}, + dictWord{16, 11, 17}, + dictWord{144, 11, 72}, + dictWord{4, 0, 1021}, + dictWord{6, 0, 2037}, + dictWord{133, 11, 907}, + dictWord{7, 0, 373}, + dictWord{8, 0, 335}, + dictWord{8, 0, 596}, + dictWord{9, 0, 488}, + dictWord{6, 10, 1700}, + dictWord{ + 7, + 10, + 293, + }, + dictWord{7, 10, 382}, + dictWord{7, 10, 1026}, + dictWord{7, 10, 1087}, + dictWord{7, 10, 2027}, + dictWord{8, 10, 252}, + dictWord{8, 10, 727}, + dictWord{ + 8, + 10, + 729, + }, + dictWord{9, 10, 30}, + dictWord{9, 10, 199}, + dictWord{9, 10, 231}, + dictWord{9, 10, 251}, + dictWord{9, 10, 334}, + dictWord{9, 10, 361}, + dictWord{9, 10, 712}, + dictWord{10, 10, 55}, + dictWord{10, 10, 60}, + dictWord{10, 10, 232}, + dictWord{10, 10, 332}, + dictWord{10, 10, 384}, + dictWord{10, 10, 396}, + dictWord{ + 10, + 10, + 504, + }, + dictWord{10, 10, 542}, + dictWord{10, 10, 652}, + dictWord{11, 10, 20}, + dictWord{11, 10, 48}, + dictWord{11, 10, 207}, + dictWord{11, 10, 291}, + dictWord{ + 11, + 10, + 298, + }, + dictWord{11, 10, 342}, + dictWord{11, 10, 365}, + dictWord{11, 10, 394}, + dictWord{11, 10, 620}, + dictWord{11, 10, 705}, + dictWord{11, 10, 1017}, + dictWord{12, 10, 123}, + dictWord{12, 10, 340}, + dictWord{12, 10, 406}, + dictWord{12, 10, 643}, + dictWord{13, 10, 61}, + dictWord{13, 10, 269}, + dictWord{ + 13, + 10, + 311, + }, + dictWord{13, 10, 319}, + dictWord{13, 10, 486}, + dictWord{14, 10, 234}, + dictWord{15, 10, 62}, + dictWord{15, 10, 85}, + dictWord{16, 10, 71}, + dictWord{ + 18, + 10, + 119, + }, + dictWord{148, 10, 105}, + dictWord{150, 0, 37}, + dictWord{4, 11, 208}, + dictWord{5, 11, 106}, + dictWord{6, 11, 531}, + dictWord{8, 11, 408}, + dictWord{ + 9, + 11, + 188, + }, + dictWord{138, 11, 572}, + dictWord{132, 0, 564}, + dictWord{6, 0, 513}, + dictWord{135, 0, 1052}, + dictWord{132, 0, 825}, + dictWord{9, 0, 899}, + dictWord{ + 140, + 11, + 441, + }, + dictWord{134, 0, 778}, + dictWord{133, 11, 379}, + dictWord{7, 0, 1417}, + dictWord{12, 0, 382}, + dictWord{17, 0, 48}, + dictWord{152, 0, 12}, + dictWord{ + 132, + 11, + 241, + }, + dictWord{7, 0, 1116}, + dictWord{6, 10, 379}, + dictWord{7, 10, 270}, + dictWord{8, 10, 176}, + dictWord{8, 10, 183}, + dictWord{9, 10, 432}, + dictWord{ + 9, + 10, + 661, + }, + dictWord{12, 10, 247}, + dictWord{12, 10, 617}, + dictWord{146, 10, 125}, + dictWord{5, 10, 792}, + dictWord{133, 10, 900}, + dictWord{6, 0, 545}, + dictWord{ + 7, + 0, + 565, + }, + dictWord{7, 0, 1669}, + dictWord{10, 0, 114}, + dictWord{11, 0, 642}, + dictWord{140, 0, 618}, + dictWord{133, 0, 5}, + dictWord{138, 11, 7}, + dictWord{ + 132, + 11, + 259, + }, + dictWord{135, 0, 192}, + dictWord{134, 0, 701}, + dictWord{136, 0, 763}, + dictWord{135, 10, 1979}, + dictWord{4, 10, 901}, + dictWord{133, 10, 776}, + dictWord{10, 0, 755}, + dictWord{147, 0, 29}, + dictWord{133, 0, 759}, + dictWord{4, 11, 173}, + dictWord{5, 11, 312}, + dictWord{5, 11, 512}, + dictWord{135, 11, 1285}, + dictWord{7, 11, 1603}, + dictWord{7, 11, 1691}, + dictWord{9, 11, 464}, + dictWord{11, 11, 195}, + dictWord{12, 11, 279}, + dictWord{12, 11, 448}, + dictWord{ + 14, + 11, + 11, + }, + dictWord{147, 11, 102}, + dictWord{7, 0, 370}, + dictWord{7, 0, 1007}, + dictWord{7, 0, 1177}, + dictWord{135, 0, 1565}, + dictWord{135, 0, 1237}, + dictWord{ + 4, + 0, + 87, + }, + dictWord{5, 0, 250}, + dictWord{141, 0, 298}, + dictWord{4, 11, 452}, + dictWord{5, 11, 583}, + dictWord{5, 11, 817}, + dictWord{6, 11, 433}, + dictWord{7, 11, 593}, + dictWord{7, 11, 720}, + dictWord{7, 11, 1378}, + dictWord{8, 11, 161}, + dictWord{9, 11, 284}, + dictWord{10, 11, 313}, + dictWord{139, 11, 886}, + dictWord{4, 11, 547}, + dictWord{135, 11, 1409}, + dictWord{136, 11, 722}, + dictWord{4, 10, 37}, + dictWord{5, 10, 334}, + dictWord{135, 10, 1253}, + dictWord{132, 10, 508}, + dictWord{ + 12, + 0, + 107, + }, + dictWord{146, 0, 31}, + dictWord{8, 11, 420}, + dictWord{139, 11, 193}, + dictWord{135, 0, 814}, + dictWord{135, 11, 409}, + dictWord{140, 0, 991}, + dictWord{4, 0, 57}, + dictWord{7, 0, 1195}, + dictWord{7, 0, 1438}, + dictWord{7, 0, 1548}, + dictWord{7, 0, 1835}, + dictWord{7, 0, 1904}, + dictWord{9, 0, 757}, + dictWord{ + 10, + 0, + 604, + }, + dictWord{139, 0, 519}, + dictWord{132, 0, 540}, + dictWord{138, 11, 308}, + dictWord{132, 10, 533}, + dictWord{136, 0, 608}, + dictWord{144, 11, 65}, + dictWord{4, 0, 1014}, + dictWord{134, 0, 2029}, + dictWord{4, 0, 209}, + dictWord{7, 0, 902}, + dictWord{5, 11, 1002}, + dictWord{136, 11, 745}, + dictWord{134, 0, 2030}, + dictWord{6, 0, 303}, + dictWord{7, 0, 335}, + dictWord{7, 0, 1437}, + dictWord{7, 0, 1668}, + dictWord{8, 0, 553}, + dictWord{8, 0, 652}, + dictWord{8, 0, 656}, + dictWord{ + 9, + 0, + 558, + }, + dictWord{11, 0, 743}, + dictWord{149, 0, 18}, + dictWord{5, 11, 575}, + dictWord{6, 11, 354}, + dictWord{135, 11, 701}, + dictWord{4, 11, 239}, + dictWord{ + 6, + 11, + 477, + }, + dictWord{7, 11, 1607}, + dictWord{11, 11, 68}, + dictWord{139, 11, 617}, + dictWord{132, 0, 559}, + dictWord{8, 0, 527}, + dictWord{18, 0, 60}, + dictWord{ + 147, + 0, + 24, + }, + dictWord{133, 10, 920}, + dictWord{138, 0, 511}, + dictWord{133, 0, 1017}, + dictWord{133, 0, 675}, + dictWord{138, 10, 391}, + dictWord{11, 0, 156}, + dictWord{135, 10, 1952}, + dictWord{138, 11, 369}, + dictWord{132, 11, 367}, + dictWord{133, 0, 709}, + dictWord{6, 0, 698}, + dictWord{134, 0, 887}, + dictWord{ + 142, + 10, + 126, + }, + dictWord{134, 0, 1745}, + dictWord{132, 10, 483}, + dictWord{13, 11, 299}, + dictWord{142, 11, 75}, + dictWord{133, 0, 714}, + dictWord{7, 0, 8}, + dictWord{ + 136, + 0, + 206, + }, + dictWord{138, 10, 480}, + dictWord{4, 11, 694}, + dictWord{9, 10, 495}, + dictWord{146, 10, 104}, + dictWord{7, 11, 1248}, + dictWord{11, 11, 621}, + dictWord{139, 11, 702}, + dictWord{140, 11, 687}, + dictWord{132, 0, 776}, + dictWord{139, 10, 1009}, + dictWord{135, 0, 1272}, + dictWord{134, 0, 1059}, + dictWord{ + 8, + 10, + 653, + }, + dictWord{13, 10, 93}, + dictWord{147, 10, 14}, + dictWord{135, 11, 213}, + dictWord{136, 0, 406}, + dictWord{133, 10, 172}, + dictWord{132, 0, 947}, + dictWord{8, 0, 175}, + dictWord{10, 0, 168}, + dictWord{138, 0, 573}, + dictWord{132, 0, 870}, + dictWord{6, 0, 1567}, + dictWord{151, 11, 28}, + dictWord{ + 134, + 11, + 472, + }, + dictWord{5, 10, 260}, + dictWord{136, 11, 132}, + dictWord{4, 11, 751}, + dictWord{11, 11, 390}, + dictWord{140, 11, 32}, + dictWord{4, 11, 409}, + dictWord{ + 133, + 11, + 78, + }, + dictWord{12, 0, 554}, + dictWord{6, 11, 473}, + dictWord{145, 11, 105}, + dictWord{133, 0, 784}, + dictWord{8, 0, 908}, + dictWord{136, 11, 306}, + dictWord{139, 0, 882}, + dictWord{6, 0, 358}, + dictWord{7, 0, 1393}, + dictWord{8, 0, 396}, + dictWord{10, 0, 263}, + dictWord{14, 0, 154}, + dictWord{16, 0, 48}, + dictWord{ + 17, + 0, + 8, + }, + dictWord{7, 11, 1759}, + dictWord{8, 11, 396}, + dictWord{10, 11, 263}, + dictWord{14, 11, 154}, + dictWord{16, 11, 48}, + dictWord{145, 11, 8}, + dictWord{ + 13, + 11, + 163, + }, + dictWord{13, 11, 180}, + dictWord{18, 11, 78}, + dictWord{148, 11, 35}, + dictWord{14, 0, 32}, + dictWord{18, 0, 85}, + dictWord{20, 0, 2}, + dictWord{152, 0, 16}, + dictWord{7, 0, 228}, + dictWord{10, 0, 770}, + dictWord{8, 10, 167}, + dictWord{8, 10, 375}, + dictWord{9, 10, 82}, + dictWord{9, 10, 561}, + dictWord{138, 10, 620}, + dictWord{132, 0, 845}, + dictWord{9, 0, 14}, + dictWord{9, 0, 441}, + dictWord{10, 0, 306}, + dictWord{139, 0, 9}, + dictWord{11, 0, 966}, + dictWord{12, 0, 287}, + dictWord{ + 13, + 0, + 342, + }, + dictWord{13, 0, 402}, + dictWord{15, 0, 110}, + dictWord{15, 0, 163}, + dictWord{8, 10, 194}, + dictWord{136, 10, 756}, + dictWord{134, 0, 1578}, + dictWord{ + 4, + 0, + 967, + }, + dictWord{6, 0, 1820}, + dictWord{6, 0, 1847}, + dictWord{140, 0, 716}, + dictWord{136, 0, 594}, + dictWord{7, 0, 1428}, + dictWord{7, 0, 1640}, + dictWord{ + 7, + 0, + 1867, + }, + dictWord{9, 0, 169}, + dictWord{9, 0, 182}, + dictWord{9, 0, 367}, + dictWord{9, 0, 478}, + dictWord{9, 0, 506}, + dictWord{9, 0, 551}, + dictWord{9, 0, 557}, + dictWord{ + 9, + 0, + 648, + }, + dictWord{9, 0, 697}, + dictWord{9, 0, 705}, + dictWord{9, 0, 725}, + dictWord{9, 0, 787}, + dictWord{9, 0, 794}, + dictWord{10, 0, 198}, + dictWord{10, 0, 214}, + dictWord{10, 0, 267}, + dictWord{10, 0, 275}, + dictWord{10, 0, 456}, + dictWord{10, 0, 551}, + dictWord{10, 0, 561}, + dictWord{10, 0, 613}, + dictWord{10, 0, 627}, + dictWord{ + 10, + 0, + 668, + }, + dictWord{10, 0, 675}, + dictWord{10, 0, 691}, + dictWord{10, 0, 695}, + dictWord{10, 0, 707}, + dictWord{10, 0, 715}, + dictWord{11, 0, 183}, + dictWord{ + 11, + 0, + 201, + }, + dictWord{11, 0, 244}, + dictWord{11, 0, 262}, + dictWord{11, 0, 352}, + dictWord{11, 0, 439}, + dictWord{11, 0, 493}, + dictWord{11, 0, 572}, + dictWord{11, 0, 591}, + dictWord{11, 0, 608}, + dictWord{11, 0, 611}, + dictWord{11, 0, 646}, + dictWord{11, 0, 674}, + dictWord{11, 0, 711}, + dictWord{11, 0, 751}, + dictWord{11, 0, 761}, + dictWord{11, 0, 776}, + dictWord{11, 0, 785}, + dictWord{11, 0, 850}, + dictWord{11, 0, 853}, + dictWord{11, 0, 862}, + dictWord{11, 0, 865}, + dictWord{11, 0, 868}, + dictWord{ + 11, + 0, + 875, + }, + dictWord{11, 0, 898}, + dictWord{11, 0, 902}, + dictWord{11, 0, 903}, + dictWord{11, 0, 910}, + dictWord{11, 0, 932}, + dictWord{11, 0, 942}, + dictWord{ + 11, + 0, + 957, + }, + dictWord{11, 0, 967}, + dictWord{11, 0, 972}, + dictWord{12, 0, 148}, + dictWord{12, 0, 195}, + dictWord{12, 0, 220}, + dictWord{12, 0, 237}, + dictWord{12, 0, 318}, + dictWord{12, 0, 339}, + dictWord{12, 0, 393}, + dictWord{12, 0, 445}, + dictWord{12, 0, 450}, + dictWord{12, 0, 474}, + dictWord{12, 0, 505}, + dictWord{12, 0, 509}, + dictWord{12, 0, 533}, + dictWord{12, 0, 591}, + dictWord{12, 0, 594}, + dictWord{12, 0, 597}, + dictWord{12, 0, 621}, + dictWord{12, 0, 633}, + dictWord{12, 0, 642}, + dictWord{ + 13, + 0, + 59, + }, + dictWord{13, 0, 60}, + dictWord{13, 0, 145}, + dictWord{13, 0, 239}, + dictWord{13, 0, 250}, + dictWord{13, 0, 329}, + dictWord{13, 0, 344}, + dictWord{13, 0, 365}, + dictWord{13, 0, 372}, + dictWord{13, 0, 387}, + dictWord{13, 0, 403}, + dictWord{13, 0, 414}, + dictWord{13, 0, 456}, + dictWord{13, 0, 470}, + dictWord{13, 0, 478}, + dictWord{13, 0, 483}, + dictWord{13, 0, 489}, + dictWord{14, 0, 55}, + dictWord{14, 0, 57}, + dictWord{14, 0, 81}, + dictWord{14, 0, 90}, + dictWord{14, 0, 148}, + dictWord{ + 14, + 0, + 239, + }, + dictWord{14, 0, 266}, + dictWord{14, 0, 321}, + dictWord{14, 0, 326}, + dictWord{14, 0, 327}, + dictWord{14, 0, 330}, + dictWord{14, 0, 347}, + dictWord{14, 0, 355}, + dictWord{14, 0, 401}, + dictWord{14, 0, 404}, + dictWord{14, 0, 411}, + dictWord{14, 0, 414}, + dictWord{14, 0, 416}, + dictWord{14, 0, 420}, + dictWord{15, 0, 61}, + dictWord{15, 0, 74}, + dictWord{15, 0, 87}, + dictWord{15, 0, 88}, + dictWord{15, 0, 94}, + dictWord{15, 0, 96}, + dictWord{15, 0, 116}, + dictWord{15, 0, 149}, + dictWord{15, 0, 154}, + dictWord{16, 0, 50}, + dictWord{16, 0, 63}, + dictWord{16, 0, 73}, + dictWord{17, 0, 2}, + dictWord{17, 0, 66}, + dictWord{17, 0, 92}, + dictWord{17, 0, 103}, + dictWord{ + 17, + 0, + 112, + }, + dictWord{17, 0, 120}, + dictWord{18, 0, 50}, + dictWord{18, 0, 54}, + dictWord{18, 0, 82}, + dictWord{18, 0, 86}, + dictWord{18, 0, 90}, + dictWord{18, 0, 111}, + dictWord{ + 18, + 0, + 115, + }, + dictWord{18, 0, 156}, + dictWord{19, 0, 40}, + dictWord{19, 0, 79}, + dictWord{20, 0, 78}, + dictWord{21, 0, 22}, + dictWord{135, 11, 883}, + dictWord{5, 0, 161}, + dictWord{135, 0, 839}, + dictWord{4, 0, 782}, + dictWord{13, 11, 293}, + dictWord{142, 11, 56}, + dictWord{133, 11, 617}, + dictWord{139, 11, 50}, + dictWord{ + 135, + 10, + 22, + }, + dictWord{145, 0, 64}, + dictWord{5, 10, 639}, + dictWord{7, 10, 1249}, + dictWord{139, 10, 896}, + dictWord{138, 0, 998}, + dictWord{135, 11, 2042}, + dictWord{ + 4, + 11, + 546, + }, + dictWord{142, 11, 233}, + dictWord{6, 0, 1043}, + dictWord{134, 0, 1574}, + dictWord{134, 0, 1496}, + dictWord{4, 10, 102}, + dictWord{7, 10, 815}, + dictWord{7, 10, 1699}, + dictWord{139, 10, 964}, + dictWord{12, 0, 781}, + dictWord{142, 0, 461}, + dictWord{4, 11, 313}, + dictWord{133, 11, 577}, + dictWord{ + 6, + 0, + 639, + }, + dictWord{6, 0, 1114}, + dictWord{137, 0, 817}, + dictWord{8, 11, 184}, + dictWord{141, 11, 433}, + dictWord{7, 0, 1814}, + dictWord{135, 11, 935}, + dictWord{ + 10, + 0, + 997, + }, + dictWord{140, 0, 958}, + dictWord{4, 0, 812}, + dictWord{137, 11, 625}, + dictWord{132, 10, 899}, + dictWord{136, 10, 795}, + dictWord{5, 11, 886}, + dictWord{6, 11, 46}, + dictWord{6, 11, 1790}, + dictWord{7, 11, 14}, + dictWord{7, 11, 732}, + dictWord{7, 11, 1654}, + dictWord{8, 11, 95}, + dictWord{8, 11, 327}, + dictWord{ + 8, + 11, + 616, + }, + dictWord{10, 11, 598}, + dictWord{10, 11, 769}, + dictWord{11, 11, 134}, + dictWord{11, 11, 747}, + dictWord{12, 11, 378}, + dictWord{142, 11, 97}, + dictWord{136, 0, 139}, + dictWord{6, 10, 52}, + dictWord{9, 10, 104}, + dictWord{9, 10, 559}, + dictWord{12, 10, 308}, + dictWord{147, 10, 87}, + dictWord{133, 11, 1021}, + dictWord{132, 10, 604}, + dictWord{132, 10, 301}, + dictWord{136, 10, 779}, + dictWord{7, 0, 643}, + dictWord{136, 0, 236}, + dictWord{132, 11, 153}, + dictWord{ + 134, + 0, + 1172, + }, + dictWord{147, 10, 32}, + dictWord{133, 11, 798}, + dictWord{6, 0, 1338}, + dictWord{132, 11, 587}, + dictWord{6, 11, 598}, + dictWord{7, 11, 42}, + dictWord{ + 8, + 11, + 695, + }, + dictWord{10, 11, 212}, + dictWord{11, 11, 158}, + dictWord{14, 11, 196}, + dictWord{145, 11, 85}, + dictWord{135, 10, 508}, + dictWord{5, 11, 957}, + dictWord{5, 11, 1008}, + dictWord{135, 11, 249}, + dictWord{4, 11, 129}, + dictWord{135, 11, 465}, + dictWord{5, 0, 54}, + dictWord{7, 11, 470}, + dictWord{7, 11, 1057}, + dictWord{7, 11, 1201}, + dictWord{9, 11, 755}, + dictWord{11, 11, 906}, + dictWord{140, 11, 527}, + dictWord{7, 11, 908}, + dictWord{146, 11, 7}, + dictWord{ + 5, + 11, + 148, + }, + dictWord{136, 11, 450}, + dictWord{144, 11, 1}, + dictWord{4, 0, 256}, + dictWord{135, 0, 1488}, + dictWord{9, 0, 351}, + dictWord{6, 10, 310}, + dictWord{ + 7, + 10, + 1849, + }, + dictWord{8, 10, 72}, + dictWord{8, 10, 272}, + dictWord{8, 10, 431}, + dictWord{9, 10, 12}, + dictWord{10, 10, 563}, + dictWord{10, 10, 630}, + dictWord{ + 10, + 10, + 796, + }, + dictWord{10, 10, 810}, + dictWord{11, 10, 367}, + dictWord{11, 10, 599}, + dictWord{11, 10, 686}, + dictWord{140, 10, 672}, + dictWord{6, 0, 1885}, + dictWord{ + 6, + 0, + 1898, + }, + dictWord{6, 0, 1899}, + dictWord{140, 0, 955}, + dictWord{4, 0, 714}, + dictWord{133, 0, 469}, + dictWord{6, 0, 1270}, + dictWord{134, 0, 1456}, + dictWord{132, 0, 744}, + dictWord{6, 0, 313}, + dictWord{7, 10, 537}, + dictWord{8, 10, 64}, + dictWord{9, 10, 127}, + dictWord{10, 10, 496}, + dictWord{12, 10, 510}, + dictWord{141, 10, 384}, + dictWord{4, 11, 217}, + dictWord{4, 10, 244}, + dictWord{5, 11, 710}, + dictWord{7, 10, 233}, + dictWord{7, 11, 1926}, + dictWord{9, 11, 428}, + dictWord{9, 11, 708}, + dictWord{10, 11, 254}, + dictWord{10, 11, 296}, + dictWord{10, 11, 720}, + dictWord{11, 11, 109}, + dictWord{11, 11, 255}, + dictWord{12, 11, 165}, + dictWord{12, 11, 315}, + dictWord{13, 11, 107}, + dictWord{13, 11, 203}, + dictWord{14, 11, 54}, + dictWord{14, 11, 99}, + dictWord{14, 11, 114}, + dictWord{ + 14, + 11, + 388, + }, + dictWord{16, 11, 85}, + dictWord{17, 11, 9}, + dictWord{17, 11, 33}, + dictWord{20, 11, 25}, + dictWord{20, 11, 28}, + dictWord{20, 11, 29}, + dictWord{21, 11, 9}, + dictWord{21, 11, 10}, + dictWord{21, 11, 34}, + dictWord{150, 11, 17}, + dictWord{138, 0, 402}, + dictWord{7, 0, 969}, + dictWord{146, 0, 55}, + dictWord{8, 0, 50}, + dictWord{ + 137, + 0, + 624, + }, + dictWord{134, 0, 1355}, + dictWord{132, 0, 572}, + dictWord{134, 10, 1650}, + dictWord{10, 10, 702}, + dictWord{139, 10, 245}, + dictWord{ + 10, + 0, + 847, + }, + dictWord{142, 0, 445}, + dictWord{6, 0, 43}, + dictWord{7, 0, 38}, + dictWord{8, 0, 248}, + dictWord{138, 0, 513}, + dictWord{133, 0, 369}, + dictWord{137, 10, 338}, + dictWord{133, 0, 766}, + dictWord{133, 0, 363}, + dictWord{133, 10, 896}, + dictWord{8, 11, 392}, + dictWord{11, 11, 54}, + dictWord{13, 11, 173}, + dictWord{ + 13, + 11, + 294, + }, + dictWord{148, 11, 7}, + dictWord{134, 0, 678}, + dictWord{7, 11, 1230}, + dictWord{136, 11, 531}, + dictWord{6, 0, 258}, + dictWord{140, 0, 409}, + dictWord{ + 5, + 0, + 249, + }, + dictWord{148, 0, 82}, + dictWord{7, 10, 1117}, + dictWord{136, 10, 539}, + dictWord{5, 0, 393}, + dictWord{6, 0, 378}, + dictWord{7, 0, 1981}, + dictWord{9, 0, 32}, + dictWord{9, 0, 591}, + dictWord{10, 0, 685}, + dictWord{10, 0, 741}, + dictWord{142, 0, 382}, + dictWord{133, 0, 788}, + dictWord{134, 0, 1281}, + dictWord{ + 134, + 0, + 1295, + }, + dictWord{7, 0, 1968}, + dictWord{141, 0, 509}, + dictWord{4, 0, 61}, + dictWord{5, 0, 58}, + dictWord{5, 0, 171}, + dictWord{5, 0, 683}, + dictWord{6, 0, 291}, + dictWord{ + 6, + 0, + 566, + }, + dictWord{7, 0, 1650}, + dictWord{11, 0, 523}, + dictWord{12, 0, 273}, + dictWord{12, 0, 303}, + dictWord{15, 0, 39}, + dictWord{143, 0, 111}, + dictWord{ + 6, + 0, + 706, + }, + dictWord{134, 0, 1283}, + dictWord{134, 0, 589}, + dictWord{135, 11, 1433}, + dictWord{133, 11, 435}, + dictWord{7, 0, 1059}, + dictWord{13, 0, 54}, + dictWord{ + 5, + 10, + 4, + }, + dictWord{5, 10, 810}, + dictWord{6, 10, 13}, + dictWord{6, 10, 538}, + dictWord{6, 10, 1690}, + dictWord{6, 10, 1726}, + dictWord{7, 10, 1819}, + dictWord{ + 8, + 10, + 148, + }, + dictWord{8, 10, 696}, + dictWord{8, 10, 791}, + dictWord{12, 10, 125}, + dictWord{143, 10, 9}, + dictWord{135, 10, 1268}, + dictWord{5, 11, 85}, + dictWord{ + 6, + 11, + 419, + }, + dictWord{7, 11, 134}, + dictWord{7, 11, 305}, + dictWord{7, 11, 361}, + dictWord{7, 11, 1337}, + dictWord{8, 11, 71}, + dictWord{140, 11, 519}, + dictWord{ + 137, + 0, + 824, + }, + dictWord{140, 11, 688}, + dictWord{5, 11, 691}, + dictWord{7, 11, 345}, + dictWord{7, 10, 1385}, + dictWord{9, 11, 94}, + dictWord{11, 10, 582}, + dictWord{ + 11, + 10, + 650, + }, + dictWord{11, 10, 901}, + dictWord{11, 10, 949}, + dictWord{12, 11, 169}, + dictWord{12, 10, 232}, + dictWord{12, 10, 236}, + dictWord{13, 10, 413}, + dictWord{13, 10, 501}, + dictWord{146, 10, 116}, + dictWord{4, 0, 917}, + dictWord{133, 0, 1005}, + dictWord{7, 0, 1598}, + dictWord{5, 11, 183}, + dictWord{6, 11, 582}, + dictWord{9, 11, 344}, + dictWord{10, 11, 679}, + dictWord{140, 11, 435}, + dictWord{4, 10, 925}, + dictWord{5, 10, 803}, + dictWord{8, 10, 698}, + dictWord{ + 138, + 10, + 828, + }, + dictWord{132, 0, 919}, + dictWord{135, 11, 511}, + dictWord{139, 10, 992}, + dictWord{4, 0, 255}, + dictWord{5, 0, 302}, + dictWord{6, 0, 132}, + dictWord{ + 7, + 0, + 128, + }, + dictWord{7, 0, 283}, + dictWord{7, 0, 1299}, + dictWord{10, 0, 52}, + dictWord{10, 0, 514}, + dictWord{11, 0, 925}, + dictWord{13, 0, 92}, + dictWord{142, 0, 309}, + dictWord{134, 0, 1369}, + dictWord{135, 10, 1847}, + dictWord{134, 0, 328}, + dictWord{7, 11, 1993}, + dictWord{136, 11, 684}, + dictWord{133, 10, 383}, + dictWord{137, 0, 173}, + dictWord{134, 11, 583}, + dictWord{134, 0, 1411}, + dictWord{19, 0, 65}, + dictWord{5, 11, 704}, + dictWord{8, 11, 357}, + dictWord{10, 11, 745}, + dictWord{14, 11, 426}, + dictWord{17, 11, 94}, + dictWord{147, 11, 57}, + dictWord{9, 10, 660}, + dictWord{138, 10, 347}, + dictWord{4, 11, 179}, + dictWord{5, 11, 198}, + dictWord{133, 11, 697}, + dictWord{7, 11, 347}, + dictWord{7, 11, 971}, + dictWord{8, 11, 181}, + dictWord{138, 11, 711}, + dictWord{141, 0, 442}, + dictWord{ + 11, + 0, + 842, + }, + dictWord{11, 0, 924}, + dictWord{13, 0, 317}, + dictWord{13, 0, 370}, + dictWord{13, 0, 469}, + dictWord{13, 0, 471}, + dictWord{14, 0, 397}, + dictWord{18, 0, 69}, + dictWord{18, 0, 145}, + dictWord{7, 10, 572}, + dictWord{9, 10, 592}, + dictWord{11, 10, 680}, + dictWord{12, 10, 356}, + dictWord{140, 10, 550}, + dictWord{14, 11, 19}, + dictWord{14, 11, 28}, + dictWord{144, 11, 29}, + dictWord{136, 0, 534}, + dictWord{4, 11, 243}, + dictWord{5, 11, 203}, + dictWord{7, 11, 19}, + dictWord{7, 11, 71}, + dictWord{7, 11, 113}, + dictWord{10, 11, 405}, + dictWord{11, 11, 357}, + dictWord{142, 11, 240}, + dictWord{6, 0, 210}, + dictWord{10, 0, 845}, + dictWord{138, 0, 862}, + dictWord{7, 11, 1351}, + dictWord{9, 11, 581}, + dictWord{10, 11, 639}, + dictWord{11, 11, 453}, + dictWord{140, 11, 584}, + dictWord{7, 11, 1450}, + dictWord{ + 139, + 11, + 99, + }, + dictWord{10, 0, 892}, + dictWord{12, 0, 719}, + dictWord{144, 0, 105}, + dictWord{4, 0, 284}, + dictWord{6, 0, 223}, + dictWord{134, 11, 492}, + dictWord{5, 11, 134}, + dictWord{6, 11, 408}, + dictWord{6, 11, 495}, + dictWord{135, 11, 1593}, + dictWord{136, 0, 529}, + dictWord{137, 0, 807}, + dictWord{4, 0, 218}, + dictWord{7, 0, 526}, + dictWord{143, 0, 137}, + dictWord{6, 0, 1444}, + dictWord{142, 11, 4}, + dictWord{132, 11, 665}, + dictWord{4, 0, 270}, + dictWord{5, 0, 192}, + dictWord{6, 0, 332}, + dictWord{7, 0, 1322}, + dictWord{4, 11, 248}, + dictWord{7, 11, 137}, + dictWord{137, 11, 349}, + dictWord{140, 0, 661}, + dictWord{7, 0, 1517}, + dictWord{11, 0, 597}, + dictWord{14, 0, 76}, + dictWord{14, 0, 335}, + dictWord{20, 0, 33}, + dictWord{7, 10, 748}, + dictWord{139, 10, 700}, + dictWord{5, 11, 371}, + dictWord{135, 11, 563}, + dictWord{146, 11, 57}, + dictWord{133, 10, 127}, + dictWord{133, 0, 418}, + dictWord{4, 11, 374}, + dictWord{7, 11, 547}, + dictWord{7, 11, 1700}, + dictWord{7, 11, 1833}, + dictWord{139, 11, 858}, + dictWord{6, 10, 198}, + dictWord{140, 10, 83}, + dictWord{7, 11, 1812}, + dictWord{13, 11, 259}, + dictWord{13, 11, 356}, + dictWord{ + 14, + 11, + 242, + }, + dictWord{147, 11, 114}, + dictWord{7, 0, 379}, + dictWord{8, 0, 481}, + dictWord{9, 0, 377}, + dictWord{5, 10, 276}, + dictWord{6, 10, 55}, + dictWord{ + 135, + 10, + 1369, + }, + dictWord{138, 11, 286}, + dictWord{5, 0, 1003}, + dictWord{6, 0, 149}, + dictWord{6, 10, 1752}, + dictWord{136, 10, 726}, + dictWord{8, 0, 262}, + dictWord{ + 9, + 0, + 627, + }, + dictWord{10, 0, 18}, + dictWord{11, 0, 214}, + dictWord{11, 0, 404}, + dictWord{11, 0, 457}, + dictWord{11, 0, 780}, + dictWord{11, 0, 913}, + dictWord{13, 0, 401}, + dictWord{14, 0, 200}, + dictWord{6, 11, 1647}, + dictWord{7, 11, 1552}, + dictWord{7, 11, 2010}, + dictWord{9, 11, 494}, + dictWord{137, 11, 509}, + dictWord{ + 135, + 0, + 742, + }, + dictWord{136, 0, 304}, + dictWord{132, 0, 142}, + dictWord{133, 10, 764}, + dictWord{6, 10, 309}, + dictWord{7, 10, 331}, + dictWord{138, 10, 550}, + dictWord{135, 10, 1062}, + dictWord{6, 11, 123}, + dictWord{7, 11, 214}, + dictWord{7, 10, 986}, + dictWord{9, 11, 728}, + dictWord{10, 11, 157}, + dictWord{11, 11, 346}, + dictWord{11, 11, 662}, + dictWord{143, 11, 106}, + dictWord{135, 10, 1573}, + dictWord{7, 0, 925}, + dictWord{137, 0, 799}, + dictWord{4, 0, 471}, + dictWord{5, 0, 51}, + dictWord{6, 0, 602}, + dictWord{8, 0, 484}, + dictWord{138, 0, 195}, + dictWord{136, 0, 688}, + dictWord{132, 0, 697}, + dictWord{6, 0, 1169}, + dictWord{6, 0, 1241}, + dictWord{6, 10, 194}, + dictWord{7, 10, 133}, + dictWord{10, 10, 493}, + dictWord{10, 10, 570}, + dictWord{139, 10, 664}, + dictWord{140, 0, 751}, + dictWord{7, 0, 929}, + dictWord{10, 0, 452}, + dictWord{11, 0, 878}, + dictWord{16, 0, 33}, + dictWord{5, 10, 24}, + dictWord{5, 10, 569}, + dictWord{6, 10, 3}, + dictWord{6, 10, 119}, + dictWord{ + 6, + 10, + 143, + }, + dictWord{6, 10, 440}, + dictWord{7, 10, 599}, + dictWord{7, 10, 1686}, + dictWord{7, 10, 1854}, + dictWord{8, 10, 424}, + dictWord{9, 10, 43}, + dictWord{ + 9, + 10, + 584, + }, + dictWord{9, 10, 760}, + dictWord{10, 10, 328}, + dictWord{11, 10, 159}, + dictWord{11, 10, 253}, + dictWord{12, 10, 487}, + dictWord{140, 10, 531}, + dictWord{ + 4, + 11, + 707, + }, + dictWord{13, 11, 106}, + dictWord{18, 11, 49}, + dictWord{147, 11, 41}, + dictWord{5, 0, 221}, + dictWord{5, 11, 588}, + dictWord{134, 11, 393}, + dictWord{134, 0, 1437}, + dictWord{6, 11, 211}, + dictWord{7, 11, 1690}, + dictWord{11, 11, 486}, + dictWord{140, 11, 369}, + dictWord{5, 10, 14}, + dictWord{5, 10, 892}, + dictWord{6, 10, 283}, + dictWord{7, 10, 234}, + dictWord{136, 10, 537}, + dictWord{4, 0, 988}, + dictWord{136, 0, 955}, + dictWord{135, 0, 1251}, + dictWord{4, 10, 126}, + dictWord{8, 10, 635}, + dictWord{147, 10, 34}, + dictWord{4, 10, 316}, + dictWord{135, 10, 1561}, + dictWord{137, 10, 861}, + dictWord{4, 10, 64}, + dictWord{ + 5, + 10, + 352, + }, + dictWord{5, 10, 720}, + dictWord{6, 10, 368}, + dictWord{139, 10, 359}, + dictWord{134, 0, 192}, + dictWord{4, 0, 132}, + dictWord{5, 0, 69}, + dictWord{ + 135, + 0, + 1242, + }, + dictWord{7, 10, 1577}, + dictWord{10, 10, 304}, + dictWord{10, 10, 549}, + dictWord{12, 10, 365}, + dictWord{13, 10, 220}, + dictWord{13, 10, 240}, + dictWord{142, 10, 33}, + dictWord{4, 0, 111}, + dictWord{7, 0, 865}, + dictWord{134, 11, 219}, + dictWord{5, 11, 582}, + dictWord{6, 11, 1646}, + dictWord{7, 11, 99}, + dictWord{ + 7, + 11, + 1962, + }, + dictWord{7, 11, 1986}, + dictWord{8, 11, 515}, + dictWord{8, 11, 773}, + dictWord{9, 11, 23}, + dictWord{9, 11, 491}, + dictWord{12, 11, 620}, + dictWord{ + 14, + 11, + 52, + }, + dictWord{145, 11, 50}, + dictWord{132, 0, 767}, + dictWord{7, 11, 568}, + dictWord{148, 11, 21}, + dictWord{6, 0, 42}, + dictWord{7, 0, 1416}, + dictWord{ + 7, + 0, + 2005, + }, + dictWord{8, 0, 131}, + dictWord{8, 0, 466}, + dictWord{9, 0, 672}, + dictWord{13, 0, 252}, + dictWord{20, 0, 103}, + dictWord{133, 11, 851}, + dictWord{ + 135, + 0, + 1050, + }, + dictWord{6, 10, 175}, + dictWord{137, 10, 289}, + dictWord{5, 10, 432}, + dictWord{133, 10, 913}, + dictWord{6, 0, 44}, + dictWord{136, 0, 368}, + dictWord{ + 135, + 11, + 784, + }, + dictWord{132, 0, 570}, + dictWord{133, 0, 120}, + dictWord{139, 10, 595}, + dictWord{140, 0, 29}, + dictWord{6, 0, 227}, + dictWord{135, 0, 1589}, + dictWord{4, 11, 98}, + dictWord{7, 11, 1365}, + dictWord{9, 11, 422}, + dictWord{9, 11, 670}, + dictWord{10, 11, 775}, + dictWord{11, 11, 210}, + dictWord{13, 11, 26}, + dictWord{13, 11, 457}, + dictWord{141, 11, 476}, + dictWord{140, 10, 80}, + dictWord{5, 10, 931}, + dictWord{134, 10, 1698}, + dictWord{133, 0, 522}, + dictWord{ + 134, + 0, + 1120, + }, + dictWord{135, 0, 1529}, + dictWord{12, 0, 739}, + dictWord{14, 0, 448}, + dictWord{142, 0, 467}, + dictWord{11, 10, 526}, + dictWord{11, 10, 939}, + dictWord{141, 10, 290}, + dictWord{5, 10, 774}, + dictWord{6, 10, 1637}, + dictWord{6, 10, 1686}, + dictWord{134, 10, 1751}, + dictWord{6, 0, 1667}, + dictWord{ + 135, + 0, + 2036, + }, + dictWord{7, 10, 1167}, + dictWord{11, 10, 934}, + dictWord{13, 10, 391}, + dictWord{145, 10, 76}, + dictWord{137, 11, 147}, + dictWord{6, 10, 260}, + dictWord{ + 7, + 10, + 1484, + }, + dictWord{11, 11, 821}, + dictWord{12, 11, 110}, + dictWord{12, 11, 153}, + dictWord{18, 11, 41}, + dictWord{150, 11, 19}, + dictWord{6, 0, 511}, + dictWord{12, 0, 132}, + dictWord{134, 10, 573}, + dictWord{5, 0, 568}, + dictWord{6, 0, 138}, + dictWord{135, 0, 1293}, + dictWord{132, 0, 1020}, + dictWord{8, 0, 258}, + dictWord{9, 0, 208}, + dictWord{137, 0, 359}, + dictWord{4, 0, 565}, + dictWord{8, 0, 23}, + dictWord{136, 0, 827}, + dictWord{134, 0, 344}, + dictWord{4, 0, 922}, + dictWord{ + 5, + 0, + 1023, + }, + dictWord{13, 11, 477}, + dictWord{14, 11, 120}, + dictWord{148, 11, 61}, + dictWord{134, 0, 240}, + dictWord{5, 11, 209}, + dictWord{6, 11, 30}, + dictWord{ + 11, + 11, + 56, + }, + dictWord{139, 11, 305}, + dictWord{6, 0, 171}, + dictWord{7, 0, 1002}, + dictWord{7, 0, 1324}, + dictWord{9, 0, 415}, + dictWord{14, 0, 230}, + dictWord{ + 18, + 0, + 68, + }, + dictWord{4, 10, 292}, + dictWord{4, 10, 736}, + dictWord{5, 10, 871}, + dictWord{6, 10, 1689}, + dictWord{7, 10, 1944}, + dictWord{137, 10, 580}, + dictWord{ + 9, + 11, + 635, + }, + dictWord{139, 11, 559}, + dictWord{4, 11, 150}, + dictWord{5, 11, 303}, + dictWord{134, 11, 327}, + dictWord{6, 10, 63}, + dictWord{135, 10, 920}, + dictWord{ + 133, + 10, + 793, + }, + dictWord{8, 11, 192}, + dictWord{10, 11, 78}, + dictWord{10, 11, 555}, + dictWord{11, 11, 308}, + dictWord{13, 11, 359}, + dictWord{147, 11, 95}, + dictWord{135, 11, 786}, + dictWord{135, 11, 1712}, + dictWord{136, 0, 402}, + dictWord{6, 0, 754}, + dictWord{6, 11, 1638}, + dictWord{7, 11, 79}, + dictWord{7, 11, 496}, + dictWord{9, 11, 138}, + dictWord{10, 11, 336}, + dictWord{11, 11, 12}, + dictWord{12, 11, 412}, + dictWord{12, 11, 440}, + dictWord{142, 11, 305}, + dictWord{4, 0, 716}, + dictWord{141, 0, 31}, + dictWord{133, 0, 982}, + dictWord{8, 0, 691}, + dictWord{8, 0, 731}, + dictWord{5, 10, 67}, + dictWord{6, 10, 62}, + dictWord{6, 10, 374}, + dictWord{ + 135, + 10, + 1391, + }, + dictWord{9, 10, 790}, + dictWord{140, 10, 47}, + dictWord{139, 11, 556}, + dictWord{151, 11, 1}, + dictWord{7, 11, 204}, + dictWord{7, 11, 415}, + dictWord{8, 11, 42}, + dictWord{10, 11, 85}, + dictWord{11, 11, 33}, + dictWord{11, 11, 564}, + dictWord{12, 11, 571}, + dictWord{149, 11, 1}, + dictWord{8, 0, 888}, + dictWord{ + 7, + 11, + 610, + }, + dictWord{135, 11, 1501}, + dictWord{4, 10, 391}, + dictWord{135, 10, 1169}, + dictWord{5, 0, 847}, + dictWord{9, 0, 840}, + dictWord{138, 0, 803}, + dictWord{137, 0, 823}, + dictWord{134, 0, 785}, + dictWord{8, 0, 152}, + dictWord{9, 0, 53}, + dictWord{9, 0, 268}, + dictWord{9, 0, 901}, + dictWord{10, 0, 518}, + dictWord{ + 10, + 0, + 829, + }, + dictWord{11, 0, 188}, + dictWord{13, 0, 74}, + dictWord{14, 0, 46}, + dictWord{15, 0, 17}, + dictWord{15, 0, 33}, + dictWord{17, 0, 40}, + dictWord{18, 0, 36}, + dictWord{ + 19, + 0, + 20, + }, + dictWord{22, 0, 1}, + dictWord{152, 0, 2}, + dictWord{4, 11, 3}, + dictWord{5, 11, 247}, + dictWord{5, 11, 644}, + dictWord{7, 11, 744}, + dictWord{7, 11, 1207}, + dictWord{7, 11, 1225}, + dictWord{7, 11, 1909}, + dictWord{146, 11, 147}, + dictWord{136, 0, 532}, + dictWord{135, 0, 681}, + dictWord{132, 10, 271}, + dictWord{ + 140, + 0, + 314, + }, + dictWord{140, 0, 677}, + dictWord{4, 0, 684}, + dictWord{136, 0, 384}, + dictWord{5, 11, 285}, + dictWord{9, 11, 67}, + dictWord{13, 11, 473}, + dictWord{ + 143, + 11, + 82, + }, + dictWord{4, 10, 253}, + dictWord{5, 10, 544}, + dictWord{7, 10, 300}, + dictWord{137, 10, 340}, + dictWord{7, 0, 110}, + dictWord{7, 0, 447}, + dictWord{8, 0, 290}, + dictWord{8, 0, 591}, + dictWord{9, 0, 382}, + dictWord{9, 0, 649}, + dictWord{11, 0, 71}, + dictWord{11, 0, 155}, + dictWord{11, 0, 313}, + dictWord{12, 0, 5}, + dictWord{13, 0, 325}, + dictWord{142, 0, 287}, + dictWord{134, 0, 1818}, + dictWord{136, 0, 1007}, + dictWord{138, 0, 321}, + dictWord{7, 0, 360}, + dictWord{7, 0, 425}, + dictWord{9, 0, 66}, + dictWord{9, 0, 278}, + dictWord{138, 0, 644}, + dictWord{133, 10, 818}, + dictWord{5, 0, 385}, + dictWord{5, 10, 541}, + dictWord{6, 10, 94}, + dictWord{6, 10, 499}, + dictWord{ + 7, + 10, + 230, + }, + dictWord{139, 10, 321}, + dictWord{4, 10, 920}, + dictWord{5, 10, 25}, + dictWord{5, 10, 790}, + dictWord{6, 10, 457}, + dictWord{7, 10, 853}, + dictWord{ + 136, + 10, + 788, + }, + dictWord{4, 0, 900}, + dictWord{133, 0, 861}, + dictWord{5, 0, 254}, + dictWord{7, 0, 985}, + dictWord{136, 0, 73}, + dictWord{7, 0, 1959}, + dictWord{ + 136, + 0, + 683, + }, + dictWord{134, 10, 1765}, + dictWord{133, 10, 822}, + dictWord{132, 10, 634}, + dictWord{4, 11, 29}, + dictWord{6, 11, 532}, + dictWord{7, 11, 1628}, + dictWord{ + 7, + 11, + 1648, + }, + dictWord{9, 11, 303}, + dictWord{9, 11, 350}, + dictWord{10, 11, 433}, + dictWord{11, 11, 97}, + dictWord{11, 11, 557}, + dictWord{11, 11, 745}, + dictWord{12, 11, 289}, + dictWord{12, 11, 335}, + dictWord{12, 11, 348}, + dictWord{12, 11, 606}, + dictWord{13, 11, 116}, + dictWord{13, 11, 233}, + dictWord{ + 13, + 11, + 466, + }, + dictWord{14, 11, 181}, + dictWord{14, 11, 209}, + dictWord{14, 11, 232}, + dictWord{14, 11, 236}, + dictWord{14, 11, 300}, + dictWord{16, 11, 41}, + dictWord{ + 148, + 11, + 97, + }, + dictWord{19, 0, 86}, + dictWord{6, 10, 36}, + dictWord{7, 10, 658}, + dictWord{136, 10, 454}, + dictWord{135, 11, 1692}, + dictWord{132, 0, 725}, + dictWord{ + 5, + 11, + 501, + }, + dictWord{7, 11, 1704}, + dictWord{9, 11, 553}, + dictWord{11, 11, 520}, + dictWord{12, 11, 557}, + dictWord{141, 11, 249}, + dictWord{134, 0, 196}, + dictWord{133, 0, 831}, + dictWord{136, 0, 723}, + dictWord{7, 0, 1897}, + dictWord{13, 0, 80}, + dictWord{13, 0, 437}, + dictWord{145, 0, 74}, + dictWord{4, 0, 992}, + dictWord{ + 6, + 0, + 627, + }, + dictWord{136, 0, 994}, + dictWord{135, 11, 1294}, + dictWord{132, 10, 104}, + dictWord{5, 0, 848}, + dictWord{6, 0, 66}, + dictWord{136, 0, 764}, + dictWord{ + 4, + 0, + 36, + }, + dictWord{7, 0, 1387}, + dictWord{10, 0, 205}, + dictWord{139, 0, 755}, + dictWord{6, 0, 1046}, + dictWord{134, 0, 1485}, + dictWord{134, 0, 950}, + dictWord{132, 0, 887}, + dictWord{14, 0, 450}, + dictWord{148, 0, 111}, + dictWord{7, 0, 620}, + dictWord{7, 0, 831}, + dictWord{9, 10, 542}, + dictWord{9, 10, 566}, + dictWord{ + 138, + 10, + 728, + }, + dictWord{6, 0, 165}, + dictWord{138, 0, 388}, + dictWord{139, 10, 263}, + dictWord{4, 0, 719}, + dictWord{135, 0, 155}, + dictWord{138, 10, 468}, + dictWord{6, 11, 453}, + dictWord{144, 11, 36}, + dictWord{134, 11, 129}, + dictWord{5, 0, 533}, + dictWord{7, 0, 755}, + dictWord{138, 0, 780}, + dictWord{134, 0, 1465}, + dictWord{4, 0, 353}, + dictWord{6, 0, 146}, + dictWord{6, 0, 1789}, + dictWord{7, 0, 427}, + dictWord{7, 0, 990}, + dictWord{7, 0, 1348}, + dictWord{9, 0, 665}, + dictWord{9, 0, 898}, + dictWord{11, 0, 893}, + dictWord{142, 0, 212}, + dictWord{7, 10, 87}, + dictWord{142, 10, 288}, + dictWord{4, 0, 45}, + dictWord{135, 0, 1257}, + dictWord{12, 0, 7}, + dictWord{7, 10, 988}, + dictWord{7, 10, 1939}, + dictWord{9, 10, 64}, + dictWord{9, 10, 502}, + dictWord{12, 10, 34}, + dictWord{13, 10, 12}, + dictWord{13, 10, 234}, + dictWord{147, 10, 77}, + dictWord{4, 0, 607}, + dictWord{5, 11, 60}, + dictWord{6, 11, 504}, + dictWord{7, 11, 614}, + dictWord{7, 11, 1155}, + dictWord{140, 11, 0}, + dictWord{ + 135, + 10, + 141, + }, + dictWord{8, 11, 198}, + dictWord{11, 11, 29}, + dictWord{140, 11, 534}, + dictWord{140, 0, 65}, + dictWord{136, 0, 816}, + dictWord{132, 10, 619}, + dictWord{139, 0, 88}, + dictWord{5, 10, 246}, + dictWord{8, 10, 189}, + dictWord{9, 10, 355}, + dictWord{9, 10, 512}, + dictWord{10, 10, 124}, + dictWord{10, 10, 453}, + dictWord{11, 10, 143}, + dictWord{11, 10, 416}, + dictWord{11, 10, 859}, + dictWord{141, 10, 341}, + dictWord{4, 11, 379}, + dictWord{135, 11, 1397}, + dictWord{ + 4, + 0, + 600, + }, + dictWord{137, 0, 621}, + dictWord{133, 0, 367}, + dictWord{134, 0, 561}, + dictWord{6, 0, 559}, + dictWord{134, 0, 1691}, + dictWord{6, 0, 585}, + dictWord{ + 134, + 11, + 585, + }, + dictWord{135, 11, 1228}, + dictWord{4, 11, 118}, + dictWord{5, 10, 678}, + dictWord{6, 11, 274}, + dictWord{6, 11, 361}, + dictWord{7, 11, 75}, + dictWord{ + 141, + 11, + 441, + }, + dictWord{135, 11, 1818}, + dictWord{137, 11, 841}, + dictWord{5, 0, 573}, + dictWord{6, 0, 287}, + dictWord{7, 10, 862}, + dictWord{7, 10, 1886}, + dictWord{138, 10, 179}, + dictWord{132, 10, 517}, + dictWord{140, 11, 693}, + dictWord{5, 11, 314}, + dictWord{6, 11, 221}, + dictWord{7, 11, 419}, + dictWord{ + 10, + 11, + 650, + }, + dictWord{11, 11, 396}, + dictWord{12, 11, 156}, + dictWord{13, 11, 369}, + dictWord{14, 11, 333}, + dictWord{145, 11, 47}, + dictWord{140, 10, 540}, + dictWord{136, 10, 667}, + dictWord{11, 10, 403}, + dictWord{146, 10, 83}, + dictWord{6, 0, 672}, + dictWord{133, 10, 761}, + dictWord{9, 0, 157}, + dictWord{10, 10, 131}, + dictWord{140, 10, 72}, + dictWord{7, 0, 714}, + dictWord{134, 11, 460}, + dictWord{134, 0, 456}, + dictWord{133, 0, 925}, + dictWord{5, 11, 682}, + dictWord{ + 135, + 11, + 1887, + }, + dictWord{136, 11, 510}, + dictWord{136, 11, 475}, + dictWord{133, 11, 1016}, + dictWord{9, 0, 19}, + dictWord{7, 11, 602}, + dictWord{8, 11, 179}, + dictWord{ + 10, + 11, + 781, + }, + dictWord{140, 11, 126}, + dictWord{6, 11, 329}, + dictWord{138, 11, 111}, + dictWord{6, 0, 822}, + dictWord{134, 0, 1473}, + dictWord{144, 11, 86}, + dictWord{11, 0, 113}, + dictWord{139, 11, 113}, + dictWord{5, 11, 821}, + dictWord{134, 11, 1687}, + dictWord{133, 10, 449}, + dictWord{7, 0, 463}, + dictWord{ + 17, + 0, + 69, + }, + dictWord{136, 10, 103}, + dictWord{7, 10, 2028}, + dictWord{138, 10, 641}, + dictWord{6, 0, 193}, + dictWord{7, 0, 240}, + dictWord{7, 0, 1682}, + dictWord{ + 10, + 0, + 51, + }, + dictWord{10, 0, 640}, + dictWord{11, 0, 410}, + dictWord{13, 0, 82}, + dictWord{14, 0, 247}, + dictWord{14, 0, 331}, + dictWord{142, 0, 377}, + dictWord{6, 0, 471}, + dictWord{11, 0, 411}, + dictWord{142, 0, 2}, + dictWord{5, 11, 71}, + dictWord{7, 11, 1407}, + dictWord{9, 11, 388}, + dictWord{9, 11, 704}, + dictWord{10, 11, 261}, + dictWord{ + 10, + 11, + 619, + }, + dictWord{11, 11, 547}, + dictWord{11, 11, 619}, + dictWord{143, 11, 157}, + dictWord{136, 0, 633}, + dictWord{135, 0, 1148}, + dictWord{6, 0, 554}, + dictWord{7, 0, 1392}, + dictWord{12, 0, 129}, + dictWord{7, 10, 1274}, + dictWord{7, 10, 1386}, + dictWord{7, 11, 2008}, + dictWord{9, 11, 337}, + dictWord{10, 11, 517}, + dictWord{146, 10, 87}, + dictWord{7, 0, 803}, + dictWord{8, 0, 542}, + dictWord{6, 10, 187}, + dictWord{7, 10, 1203}, + dictWord{8, 10, 380}, + dictWord{14, 10, 117}, + dictWord{149, 10, 28}, + dictWord{6, 10, 297}, + dictWord{7, 10, 793}, + dictWord{139, 10, 938}, + dictWord{8, 0, 438}, + dictWord{11, 0, 363}, + dictWord{7, 10, 464}, + dictWord{11, 10, 105}, + dictWord{12, 10, 231}, + dictWord{14, 10, 386}, + dictWord{15, 10, 102}, + dictWord{148, 10, 75}, + dictWord{5, 11, 16}, + dictWord{6, 11, 86}, + dictWord{6, 11, 603}, + dictWord{7, 11, 292}, + dictWord{7, 11, 561}, + dictWord{8, 11, 257}, + dictWord{8, 11, 382}, + dictWord{9, 11, 721}, + dictWord{9, 11, 778}, + dictWord{ + 11, + 11, + 581, + }, + dictWord{140, 11, 466}, + dictWord{6, 0, 717}, + dictWord{4, 11, 486}, + dictWord{133, 11, 491}, + dictWord{132, 0, 875}, + dictWord{132, 11, 72}, + dictWord{6, 11, 265}, + dictWord{135, 11, 847}, + dictWord{4, 0, 237}, + dictWord{135, 0, 514}, + dictWord{6, 0, 392}, + dictWord{7, 0, 65}, + dictWord{135, 0, 2019}, + dictWord{140, 11, 261}, + dictWord{135, 11, 922}, + dictWord{137, 11, 404}, + dictWord{12, 0, 563}, + dictWord{14, 0, 101}, + dictWord{18, 0, 129}, + dictWord{ + 7, + 10, + 1010, + }, + dictWord{11, 10, 733}, + dictWord{11, 10, 759}, + dictWord{13, 10, 34}, + dictWord{146, 10, 45}, + dictWord{7, 10, 1656}, + dictWord{9, 10, 369}, + dictWord{ + 10, + 10, + 338, + }, + dictWord{10, 10, 490}, + dictWord{11, 10, 154}, + dictWord{11, 10, 545}, + dictWord{11, 10, 775}, + dictWord{13, 10, 77}, + dictWord{141, 10, 274}, + dictWord{4, 0, 444}, + dictWord{10, 0, 146}, + dictWord{140, 0, 9}, + dictWord{139, 11, 163}, + dictWord{7, 0, 1260}, + dictWord{135, 0, 1790}, + dictWord{9, 0, 222}, + dictWord{10, 0, 43}, + dictWord{139, 0, 900}, + dictWord{137, 11, 234}, + dictWord{138, 0, 971}, + dictWord{137, 0, 761}, + dictWord{134, 0, 699}, + dictWord{ + 136, + 11, + 434, + }, + dictWord{6, 0, 1116}, + dictWord{7, 0, 1366}, + dictWord{5, 10, 20}, + dictWord{6, 11, 197}, + dictWord{6, 10, 298}, + dictWord{7, 10, 659}, + dictWord{8, 11, 205}, + dictWord{137, 10, 219}, + dictWord{132, 11, 490}, + dictWord{11, 11, 820}, + dictWord{150, 11, 51}, + dictWord{7, 10, 1440}, + dictWord{11, 10, 854}, + dictWord{ + 11, + 10, + 872, + }, + dictWord{11, 10, 921}, + dictWord{12, 10, 551}, + dictWord{13, 10, 472}, + dictWord{142, 10, 367}, + dictWord{140, 11, 13}, + dictWord{132, 0, 829}, + dictWord{12, 0, 242}, + dictWord{132, 10, 439}, + dictWord{136, 10, 669}, + dictWord{6, 0, 593}, + dictWord{6, 11, 452}, + dictWord{7, 11, 312}, + dictWord{ + 138, + 11, + 219, + }, + dictWord{4, 11, 333}, + dictWord{9, 11, 176}, + dictWord{12, 11, 353}, + dictWord{141, 11, 187}, + dictWord{7, 0, 36}, + dictWord{8, 0, 201}, + dictWord{ + 136, + 0, + 605, + }, + dictWord{140, 0, 224}, + dictWord{132, 10, 233}, + dictWord{134, 0, 1430}, + dictWord{134, 0, 1806}, + dictWord{4, 0, 523}, + dictWord{133, 0, 638}, + dictWord{ + 6, + 0, + 1889, + }, + dictWord{9, 0, 958}, + dictWord{9, 0, 971}, + dictWord{9, 0, 976}, + dictWord{12, 0, 796}, + dictWord{12, 0, 799}, + dictWord{12, 0, 808}, + dictWord{ + 12, + 0, + 835, + }, + dictWord{12, 0, 836}, + dictWord{12, 0, 914}, + dictWord{12, 0, 946}, + dictWord{15, 0, 216}, + dictWord{15, 0, 232}, + dictWord{18, 0, 183}, + dictWord{18, 0, 187}, + dictWord{18, 0, 194}, + dictWord{18, 0, 212}, + dictWord{18, 0, 232}, + dictWord{149, 0, 49}, + dictWord{132, 10, 482}, + dictWord{6, 0, 827}, + dictWord{134, 0, 1434}, + dictWord{135, 10, 346}, + dictWord{134, 0, 2043}, + dictWord{6, 0, 242}, + dictWord{7, 0, 227}, + dictWord{7, 0, 1581}, + dictWord{8, 0, 104}, + dictWord{9, 0, 113}, + dictWord{9, 0, 220}, + dictWord{9, 0, 427}, + dictWord{10, 0, 136}, + dictWord{10, 0, 239}, + dictWord{11, 0, 579}, + dictWord{11, 0, 1023}, + dictWord{13, 0, 4}, + dictWord{ + 13, + 0, + 204, + }, + dictWord{13, 0, 316}, + dictWord{148, 0, 86}, + dictWord{134, 11, 1685}, + dictWord{7, 0, 148}, + dictWord{8, 0, 284}, + dictWord{141, 0, 63}, + dictWord{ + 142, + 0, + 10, + }, + dictWord{135, 11, 584}, + dictWord{134, 0, 1249}, + dictWord{7, 0, 861}, + dictWord{135, 10, 334}, + dictWord{5, 10, 795}, + dictWord{6, 10, 1741}, + dictWord{ + 137, + 11, + 70, + }, + dictWord{132, 0, 807}, + dictWord{7, 11, 135}, + dictWord{8, 11, 7}, + dictWord{8, 11, 62}, + dictWord{9, 11, 243}, + dictWord{10, 11, 658}, + dictWord{ + 10, + 11, + 697, + }, + dictWord{11, 11, 456}, + dictWord{139, 11, 756}, + dictWord{9, 11, 395}, + dictWord{138, 11, 79}, + dictWord{137, 11, 108}, + dictWord{147, 0, 94}, + dictWord{136, 0, 494}, + dictWord{135, 11, 631}, + dictWord{135, 10, 622}, + dictWord{7, 0, 1510}, + dictWord{135, 10, 1750}, + dictWord{4, 10, 203}, + dictWord{ + 135, + 10, + 1936, + }, + dictWord{7, 11, 406}, + dictWord{7, 11, 459}, + dictWord{8, 11, 606}, + dictWord{139, 11, 726}, + dictWord{7, 0, 1306}, + dictWord{8, 0, 505}, + dictWord{ + 9, + 0, + 482, + }, + dictWord{10, 0, 126}, + dictWord{11, 0, 225}, + dictWord{12, 0, 347}, + dictWord{12, 0, 449}, + dictWord{13, 0, 19}, + dictWord{14, 0, 218}, + dictWord{142, 0, 435}, + dictWord{5, 0, 268}, + dictWord{10, 0, 764}, + dictWord{12, 0, 120}, + dictWord{13, 0, 39}, + dictWord{145, 0, 127}, + dictWord{142, 11, 68}, + dictWord{11, 10, 678}, + dictWord{140, 10, 307}, + dictWord{12, 11, 268}, + dictWord{12, 11, 640}, + dictWord{142, 11, 119}, + dictWord{135, 10, 2044}, + dictWord{133, 11, 612}, + dictWord{ + 4, + 11, + 372, + }, + dictWord{7, 11, 482}, + dictWord{8, 11, 158}, + dictWord{9, 11, 602}, + dictWord{9, 11, 615}, + dictWord{10, 11, 245}, + dictWord{10, 11, 678}, + dictWord{ + 10, + 11, + 744, + }, + dictWord{11, 11, 248}, + dictWord{139, 11, 806}, + dictWord{7, 10, 311}, + dictWord{9, 10, 308}, + dictWord{140, 10, 255}, + dictWord{4, 0, 384}, + dictWord{135, 0, 1022}, + dictWord{5, 11, 854}, + dictWord{135, 11, 1991}, + dictWord{135, 10, 1266}, + dictWord{4, 10, 400}, + dictWord{5, 10, 267}, + dictWord{ + 135, + 10, + 232, + }, + dictWord{135, 0, 1703}, + dictWord{9, 0, 159}, + dictWord{11, 0, 661}, + dictWord{140, 0, 603}, + dictWord{4, 0, 964}, + dictWord{14, 0, 438}, + dictWord{ + 14, + 0, + 444, + }, + dictWord{14, 0, 456}, + dictWord{22, 0, 60}, + dictWord{22, 0, 63}, + dictWord{9, 11, 106}, + dictWord{9, 11, 163}, + dictWord{9, 11, 296}, + dictWord{10, 11, 167}, + dictWord{10, 11, 172}, + dictWord{10, 11, 777}, + dictWord{139, 11, 16}, + dictWord{136, 0, 583}, + dictWord{132, 0, 515}, + dictWord{8, 0, 632}, + dictWord{8, 0, 697}, + dictWord{137, 0, 854}, + dictWord{5, 11, 195}, + dictWord{135, 11, 1685}, + dictWord{6, 0, 1123}, + dictWord{134, 0, 1365}, + dictWord{134, 11, 328}, + dictWord{ + 7, + 11, + 1997, + }, + dictWord{8, 11, 730}, + dictWord{139, 11, 1006}, + dictWord{4, 0, 136}, + dictWord{133, 0, 551}, + dictWord{134, 0, 1782}, + dictWord{7, 0, 1287}, + dictWord{ + 9, + 0, + 44, + }, + dictWord{10, 0, 552}, + dictWord{10, 0, 642}, + dictWord{11, 0, 839}, + dictWord{12, 0, 274}, + dictWord{12, 0, 275}, + dictWord{12, 0, 372}, + dictWord{ + 13, + 0, + 91, + }, + dictWord{142, 0, 125}, + dictWord{5, 11, 751}, + dictWord{11, 11, 797}, + dictWord{140, 11, 203}, + dictWord{133, 0, 732}, + dictWord{7, 0, 679}, + dictWord{ + 8, + 0, + 313, + }, + dictWord{4, 10, 100}, + dictWord{135, 11, 821}, + dictWord{10, 0, 361}, + dictWord{142, 0, 316}, + dictWord{134, 0, 595}, + dictWord{6, 0, 147}, + dictWord{ + 7, + 0, + 886, + }, + dictWord{9, 0, 753}, + dictWord{138, 0, 268}, + dictWord{5, 10, 362}, + dictWord{5, 10, 443}, + dictWord{6, 10, 318}, + dictWord{7, 10, 1019}, + dictWord{ + 139, + 10, + 623, + }, + dictWord{5, 10, 463}, + dictWord{136, 10, 296}, + dictWord{4, 10, 454}, + dictWord{5, 11, 950}, + dictWord{5, 11, 994}, + dictWord{134, 11, 351}, + dictWord{ + 138, + 0, + 137, + }, + dictWord{5, 10, 48}, + dictWord{5, 10, 404}, + dictWord{6, 10, 557}, + dictWord{7, 10, 458}, + dictWord{8, 10, 597}, + dictWord{10, 10, 455}, + dictWord{ + 10, + 10, + 606, + }, + dictWord{11, 10, 49}, + dictWord{11, 10, 548}, + dictWord{12, 10, 476}, + dictWord{13, 10, 18}, + dictWord{141, 10, 450}, + dictWord{133, 0, 414}, + dictWord{ + 135, + 0, + 1762, + }, + dictWord{5, 11, 421}, + dictWord{135, 11, 47}, + dictWord{5, 10, 442}, + dictWord{135, 10, 1984}, + dictWord{134, 0, 599}, + dictWord{134, 0, 1749}, + dictWord{134, 0, 1627}, + dictWord{4, 0, 488}, + dictWord{132, 11, 350}, + dictWord{137, 11, 751}, + dictWord{132, 0, 83}, + dictWord{140, 0, 676}, + dictWord{ + 133, + 11, + 967, + }, + dictWord{7, 0, 1639}, + dictWord{5, 10, 55}, + dictWord{140, 10, 161}, + dictWord{4, 11, 473}, + dictWord{7, 11, 623}, + dictWord{8, 11, 808}, + dictWord{ + 9, + 11, + 871, + }, + dictWord{9, 11, 893}, + dictWord{11, 11, 38}, + dictWord{11, 11, 431}, + dictWord{12, 11, 112}, + dictWord{12, 11, 217}, + dictWord{12, 11, 243}, + dictWord{ + 12, + 11, + 562, + }, + dictWord{12, 11, 683}, + dictWord{13, 11, 141}, + dictWord{13, 11, 197}, + dictWord{13, 11, 227}, + dictWord{13, 11, 406}, + dictWord{13, 11, 487}, + dictWord{14, 11, 156}, + dictWord{14, 11, 203}, + dictWord{14, 11, 224}, + dictWord{14, 11, 256}, + dictWord{18, 11, 58}, + dictWord{150, 11, 0}, + dictWord{ + 133, + 10, + 450, + }, + dictWord{7, 11, 736}, + dictWord{139, 11, 264}, + dictWord{134, 0, 278}, + dictWord{4, 11, 222}, + dictWord{7, 11, 286}, + dictWord{136, 11, 629}, + dictWord{ + 135, + 10, + 869, + }, + dictWord{140, 0, 97}, + dictWord{144, 0, 14}, + dictWord{134, 0, 1085}, + dictWord{4, 10, 213}, + dictWord{7, 10, 223}, + dictWord{136, 10, 80}, + dictWord{ + 7, + 0, + 388, + }, + dictWord{7, 0, 644}, + dictWord{139, 0, 781}, + dictWord{132, 0, 849}, + dictWord{7, 0, 229}, + dictWord{8, 0, 59}, + dictWord{9, 0, 190}, + dictWord{10, 0, 378}, + dictWord{140, 0, 191}, + dictWord{7, 10, 381}, + dictWord{7, 10, 806}, + dictWord{7, 10, 820}, + dictWord{8, 10, 354}, + dictWord{8, 10, 437}, + dictWord{8, 10, 787}, + dictWord{9, 10, 657}, + dictWord{10, 10, 58}, + dictWord{10, 10, 339}, + dictWord{10, 10, 749}, + dictWord{11, 10, 914}, + dictWord{12, 10, 162}, + dictWord{13, 10, 75}, + dictWord{14, 10, 106}, + dictWord{14, 10, 198}, + dictWord{14, 10, 320}, + dictWord{14, 10, 413}, + dictWord{146, 10, 43}, + dictWord{141, 11, 306}, + dictWord{ + 136, + 10, + 747, + }, + dictWord{134, 0, 1115}, + dictWord{16, 0, 94}, + dictWord{16, 0, 108}, + dictWord{136, 11, 146}, + dictWord{6, 0, 700}, + dictWord{6, 0, 817}, + dictWord{ + 134, + 0, + 1002, + }, + dictWord{133, 10, 692}, + dictWord{4, 11, 465}, + dictWord{135, 11, 1663}, + dictWord{134, 10, 191}, + dictWord{6, 0, 1414}, + dictWord{ + 135, + 11, + 913, + }, + dictWord{132, 0, 660}, + dictWord{7, 0, 1035}, + dictWord{138, 0, 737}, + dictWord{6, 10, 162}, + dictWord{7, 10, 1960}, + dictWord{136, 10, 831}, + dictWord{ + 132, + 10, + 706, + }, + dictWord{7, 0, 690}, + dictWord{9, 0, 217}, + dictWord{9, 0, 587}, + dictWord{140, 0, 521}, + dictWord{138, 10, 426}, + dictWord{135, 10, 1235}, + dictWord{ + 6, + 11, + 82, + }, + dictWord{7, 11, 138}, + dictWord{7, 11, 517}, + dictWord{9, 11, 673}, + dictWord{139, 11, 238}, + dictWord{138, 0, 272}, + dictWord{5, 11, 495}, + dictWord{ + 7, + 11, + 834, + }, + dictWord{9, 11, 733}, + dictWord{139, 11, 378}, + dictWord{134, 0, 1744}, + dictWord{132, 0, 1011}, + dictWord{7, 11, 828}, + dictWord{142, 11, 116}, + dictWord{4, 0, 733}, + dictWord{9, 0, 194}, + dictWord{10, 0, 92}, + dictWord{11, 0, 198}, + dictWord{12, 0, 84}, + dictWord{13, 0, 128}, + dictWord{133, 11, 559}, + dictWord{ + 10, + 0, + 57, + }, + dictWord{10, 0, 277}, + dictWord{6, 11, 21}, + dictWord{6, 11, 1737}, + dictWord{7, 11, 1444}, + dictWord{136, 11, 224}, + dictWord{4, 10, 204}, + dictWord{ + 137, + 10, + 902, + }, + dictWord{136, 10, 833}, + dictWord{11, 0, 348}, + dictWord{12, 0, 99}, + dictWord{18, 0, 1}, + dictWord{18, 0, 11}, + dictWord{19, 0, 4}, + dictWord{7, 10, 366}, + dictWord{9, 10, 287}, + dictWord{12, 10, 199}, + dictWord{12, 10, 556}, + dictWord{140, 10, 577}, + dictWord{6, 0, 1981}, + dictWord{136, 0, 936}, + dictWord{ + 21, + 0, + 33, + }, + dictWord{150, 0, 40}, + dictWord{5, 11, 519}, + dictWord{138, 11, 204}, + dictWord{5, 10, 356}, + dictWord{135, 10, 224}, + dictWord{134, 0, 775}, + dictWord{ + 135, + 0, + 306, + }, + dictWord{7, 10, 630}, + dictWord{9, 10, 567}, + dictWord{11, 10, 150}, + dictWord{11, 10, 444}, + dictWord{141, 10, 119}, + dictWord{5, 0, 979}, + dictWord{ + 134, + 10, + 539, + }, + dictWord{133, 0, 611}, + dictWord{4, 11, 402}, + dictWord{135, 11, 1679}, + dictWord{5, 0, 178}, + dictWord{7, 11, 2}, + dictWord{8, 11, 323}, + dictWord{ + 136, + 11, + 479, + }, + dictWord{5, 11, 59}, + dictWord{135, 11, 672}, + dictWord{4, 0, 1010}, + dictWord{6, 0, 1969}, + dictWord{138, 11, 237}, + dictWord{133, 11, 412}, + dictWord{146, 11, 34}, + dictWord{7, 11, 1740}, + dictWord{146, 11, 48}, + dictWord{134, 0, 664}, + dictWord{139, 10, 814}, + dictWord{4, 11, 85}, + dictWord{ + 135, + 11, + 549, + }, + dictWord{133, 11, 94}, + dictWord{133, 11, 457}, + dictWord{132, 0, 390}, + dictWord{134, 0, 1510}, + dictWord{4, 10, 235}, + dictWord{135, 10, 255}, + dictWord{4, 10, 194}, + dictWord{5, 10, 584}, + dictWord{6, 11, 11}, + dictWord{6, 10, 384}, + dictWord{7, 11, 187}, + dictWord{7, 10, 583}, + dictWord{10, 10, 761}, + dictWord{ + 11, + 10, + 760, + }, + dictWord{139, 10, 851}, + dictWord{4, 11, 522}, + dictWord{139, 11, 802}, + dictWord{135, 0, 493}, + dictWord{10, 11, 776}, + dictWord{13, 11, 345}, + dictWord{142, 11, 425}, + dictWord{146, 0, 37}, + dictWord{4, 11, 52}, + dictWord{135, 11, 661}, + dictWord{134, 0, 724}, + dictWord{134, 0, 829}, + dictWord{ + 133, + 11, + 520, + }, + dictWord{133, 10, 562}, + dictWord{4, 11, 281}, + dictWord{5, 11, 38}, + dictWord{7, 11, 194}, + dictWord{7, 11, 668}, + dictWord{7, 11, 1893}, + dictWord{ + 137, + 11, + 397, + }, + dictWord{5, 10, 191}, + dictWord{137, 10, 271}, + dictWord{7, 0, 1537}, + dictWord{14, 0, 96}, + dictWord{143, 0, 73}, + dictWord{5, 0, 473}, + dictWord{ + 11, + 0, + 168, + }, + dictWord{4, 10, 470}, + dictWord{6, 10, 153}, + dictWord{7, 10, 1503}, + dictWord{7, 10, 1923}, + dictWord{10, 10, 701}, + dictWord{11, 10, 132}, + dictWord{ + 11, + 10, + 227, + }, + dictWord{11, 10, 320}, + dictWord{11, 10, 436}, + dictWord{11, 10, 525}, + dictWord{11, 10, 855}, + dictWord{12, 10, 41}, + dictWord{12, 10, 286}, + dictWord{13, 10, 103}, + dictWord{13, 10, 284}, + dictWord{14, 10, 255}, + dictWord{14, 10, 262}, + dictWord{15, 10, 117}, + dictWord{143, 10, 127}, + dictWord{ + 133, + 0, + 105, + }, + dictWord{5, 0, 438}, + dictWord{9, 0, 694}, + dictWord{12, 0, 627}, + dictWord{141, 0, 210}, + dictWord{133, 10, 327}, + dictWord{6, 10, 552}, + dictWord{ + 7, + 10, + 1754, + }, + dictWord{137, 10, 604}, + dictWord{134, 0, 1256}, + dictWord{152, 0, 11}, + dictWord{5, 11, 448}, + dictWord{11, 11, 98}, + dictWord{139, 11, 524}, + dictWord{ + 7, + 0, + 1626, + }, + dictWord{5, 10, 80}, + dictWord{6, 10, 405}, + dictWord{7, 10, 403}, + dictWord{7, 10, 1502}, + dictWord{8, 10, 456}, + dictWord{9, 10, 487}, + dictWord{ + 9, + 10, + 853, + }, + dictWord{9, 10, 889}, + dictWord{10, 10, 309}, + dictWord{11, 10, 721}, + dictWord{11, 10, 994}, + dictWord{12, 10, 430}, + dictWord{13, 10, 165}, + dictWord{ + 14, + 11, + 16, + }, + dictWord{146, 11, 44}, + dictWord{132, 0, 779}, + dictWord{8, 0, 25}, + dictWord{138, 0, 826}, + dictWord{4, 10, 453}, + dictWord{5, 10, 887}, + dictWord{ + 6, + 10, + 535, + }, + dictWord{8, 10, 6}, + dictWord{8, 10, 543}, + dictWord{136, 10, 826}, + dictWord{137, 11, 461}, + dictWord{140, 11, 632}, + dictWord{132, 0, 308}, + dictWord{135, 0, 741}, + dictWord{132, 0, 671}, + dictWord{7, 0, 150}, + dictWord{8, 0, 649}, + dictWord{136, 0, 1020}, + dictWord{9, 0, 99}, + dictWord{6, 11, 336}, + dictWord{ + 8, + 11, + 552, + }, + dictWord{9, 11, 285}, + dictWord{10, 11, 99}, + dictWord{139, 11, 568}, + dictWord{134, 0, 521}, + dictWord{5, 0, 339}, + dictWord{14, 0, 3}, + dictWord{ + 15, + 0, + 41, + }, + dictWord{15, 0, 166}, + dictWord{147, 0, 66}, + dictWord{6, 11, 423}, + dictWord{7, 11, 665}, + dictWord{7, 11, 1210}, + dictWord{9, 11, 218}, + dictWord{ + 141, + 11, + 222, + }, + dictWord{6, 0, 543}, + dictWord{5, 10, 101}, + dictWord{5, 11, 256}, + dictWord{6, 10, 88}, + dictWord{7, 10, 1677}, + dictWord{9, 10, 100}, + dictWord{10, 10, 677}, + dictWord{14, 10, 169}, + dictWord{14, 10, 302}, + dictWord{14, 10, 313}, + dictWord{15, 10, 48}, + dictWord{143, 10, 84}, + dictWord{4, 10, 310}, + dictWord{ + 7, + 10, + 708, + }, + dictWord{7, 10, 996}, + dictWord{9, 10, 795}, + dictWord{10, 10, 390}, + dictWord{10, 10, 733}, + dictWord{11, 10, 451}, + dictWord{12, 10, 249}, + dictWord{ + 14, + 10, + 115, + }, + dictWord{14, 10, 286}, + dictWord{143, 10, 100}, + dictWord{133, 10, 587}, + dictWord{13, 11, 417}, + dictWord{14, 11, 129}, + dictWord{143, 11, 15}, + dictWord{134, 0, 1358}, + dictWord{136, 11, 554}, + dictWord{132, 10, 498}, + dictWord{7, 10, 217}, + dictWord{8, 10, 140}, + dictWord{138, 10, 610}, + dictWord{ + 135, + 11, + 989, + }, + dictWord{135, 11, 634}, + dictWord{6, 0, 155}, + dictWord{140, 0, 234}, + dictWord{135, 11, 462}, + dictWord{132, 11, 618}, + dictWord{ + 134, + 0, + 1628, + }, + dictWord{132, 0, 766}, + dictWord{4, 11, 339}, + dictWord{5, 10, 905}, + dictWord{135, 11, 259}, + dictWord{135, 0, 829}, + dictWord{4, 11, 759}, + dictWord{ + 141, + 11, + 169, + }, + dictWord{7, 0, 1445}, + dictWord{4, 10, 456}, + dictWord{7, 10, 358}, + dictWord{7, 10, 1637}, + dictWord{8, 10, 643}, + dictWord{139, 10, 483}, + dictWord{ + 5, + 0, + 486, + }, + dictWord{135, 0, 1349}, + dictWord{5, 11, 688}, + dictWord{135, 11, 712}, + dictWord{7, 0, 1635}, + dictWord{8, 0, 17}, + dictWord{10, 0, 217}, + dictWord{ + 10, + 0, + 295, + }, + dictWord{12, 0, 2}, + dictWord{140, 11, 2}, + dictWord{138, 0, 558}, + dictWord{150, 10, 56}, + dictWord{4, 11, 278}, + dictWord{5, 11, 465}, + dictWord{ + 135, + 11, + 1367, + }, + dictWord{136, 11, 482}, + dictWord{133, 10, 535}, + dictWord{6, 0, 1362}, + dictWord{6, 0, 1461}, + dictWord{10, 11, 274}, + dictWord{10, 11, 625}, + dictWord{139, 11, 530}, + dictWord{5, 0, 599}, + dictWord{5, 11, 336}, + dictWord{6, 11, 341}, + dictWord{6, 11, 478}, + dictWord{6, 11, 1763}, + dictWord{136, 11, 386}, + dictWord{7, 10, 1748}, + dictWord{137, 11, 151}, + dictWord{134, 0, 1376}, + dictWord{133, 10, 539}, + dictWord{135, 11, 73}, + dictWord{135, 11, 1971}, + dictWord{139, 11, 283}, + dictWord{9, 0, 93}, + dictWord{139, 0, 474}, + dictWord{6, 10, 91}, + dictWord{135, 10, 435}, + dictWord{6, 0, 447}, + dictWord{5, 11, 396}, + dictWord{134, 11, 501}, + dictWord{4, 10, 16}, + dictWord{5, 10, 316}, + dictWord{5, 10, 842}, + dictWord{6, 10, 370}, + dictWord{6, 10, 1778}, + dictWord{8, 10, 166}, + dictWord{11, 10, 812}, + dictWord{12, 10, 206}, + dictWord{12, 10, 351}, + dictWord{14, 10, 418}, + dictWord{16, 10, 15}, + dictWord{16, 10, 34}, + dictWord{18, 10, 3}, + dictWord{19, 10, 3}, + dictWord{19, 10, 7}, + dictWord{20, 10, 4}, + dictWord{149, 10, 21}, + dictWord{7, 0, 577}, + dictWord{7, 0, 1432}, + dictWord{9, 0, 475}, + dictWord{9, 0, 505}, + dictWord{9, 0, 526}, + dictWord{9, 0, 609}, + dictWord{9, 0, 689}, + dictWord{9, 0, 726}, + dictWord{9, 0, 735}, + dictWord{9, 0, 738}, + dictWord{10, 0, 556}, + dictWord{ + 10, + 0, + 674, + }, + dictWord{10, 0, 684}, + dictWord{11, 0, 89}, + dictWord{11, 0, 202}, + dictWord{11, 0, 272}, + dictWord{11, 0, 380}, + dictWord{11, 0, 415}, + dictWord{11, 0, 505}, + dictWord{11, 0, 537}, + dictWord{11, 0, 550}, + dictWord{11, 0, 562}, + dictWord{11, 0, 640}, + dictWord{11, 0, 667}, + dictWord{11, 0, 688}, + dictWord{11, 0, 847}, + dictWord{11, 0, 927}, + dictWord{11, 0, 930}, + dictWord{11, 0, 940}, + dictWord{12, 0, 144}, + dictWord{12, 0, 325}, + dictWord{12, 0, 329}, + dictWord{12, 0, 389}, + dictWord{ + 12, + 0, + 403, + }, + dictWord{12, 0, 451}, + dictWord{12, 0, 515}, + dictWord{12, 0, 604}, + dictWord{12, 0, 616}, + dictWord{12, 0, 626}, + dictWord{13, 0, 66}, + dictWord{ + 13, + 0, + 131, + }, + dictWord{13, 0, 167}, + dictWord{13, 0, 236}, + dictWord{13, 0, 368}, + dictWord{13, 0, 411}, + dictWord{13, 0, 434}, + dictWord{13, 0, 453}, + dictWord{13, 0, 461}, + dictWord{13, 0, 474}, + dictWord{14, 0, 59}, + dictWord{14, 0, 60}, + dictWord{14, 0, 139}, + dictWord{14, 0, 152}, + dictWord{14, 0, 276}, + dictWord{14, 0, 353}, + dictWord{ + 14, + 0, + 402, + }, + dictWord{15, 0, 28}, + dictWord{15, 0, 81}, + dictWord{15, 0, 123}, + dictWord{15, 0, 152}, + dictWord{18, 0, 136}, + dictWord{148, 0, 88}, + dictWord{ + 4, + 11, + 929, + }, + dictWord{133, 11, 799}, + dictWord{136, 11, 46}, + dictWord{142, 0, 307}, + dictWord{4, 0, 609}, + dictWord{7, 0, 756}, + dictWord{9, 0, 544}, + dictWord{ + 11, + 0, + 413, + }, + dictWord{144, 0, 25}, + dictWord{10, 0, 687}, + dictWord{7, 10, 619}, + dictWord{10, 10, 547}, + dictWord{11, 10, 122}, + dictWord{140, 10, 601}, + dictWord{ + 4, + 0, + 930, + }, + dictWord{133, 0, 947}, + dictWord{133, 0, 939}, + dictWord{142, 0, 21}, + dictWord{4, 11, 892}, + dictWord{133, 11, 770}, + dictWord{133, 0, 962}, + dictWord{ + 5, + 0, + 651, + }, + dictWord{8, 0, 170}, + dictWord{9, 0, 61}, + dictWord{9, 0, 63}, + dictWord{10, 0, 23}, + dictWord{10, 0, 37}, + dictWord{10, 0, 834}, + dictWord{11, 0, 4}, + dictWord{ + 11, + 0, + 187, + }, + dictWord{11, 0, 281}, + dictWord{11, 0, 503}, + dictWord{11, 0, 677}, + dictWord{12, 0, 96}, + dictWord{12, 0, 130}, + dictWord{12, 0, 244}, + dictWord{14, 0, 5}, + dictWord{14, 0, 40}, + dictWord{14, 0, 162}, + dictWord{14, 0, 202}, + dictWord{146, 0, 133}, + dictWord{4, 0, 406}, + dictWord{5, 0, 579}, + dictWord{12, 0, 492}, + dictWord{ + 150, + 0, + 15, + }, + dictWord{135, 11, 158}, + dictWord{135, 0, 597}, + dictWord{132, 0, 981}, + dictWord{132, 10, 888}, + dictWord{4, 10, 149}, + dictWord{138, 10, 368}, + dictWord{132, 0, 545}, + dictWord{4, 10, 154}, + dictWord{7, 10, 1134}, + dictWord{136, 10, 105}, + dictWord{135, 11, 2001}, + dictWord{134, 0, 1558}, + dictWord{ + 4, + 10, + 31, + }, + dictWord{6, 10, 429}, + dictWord{7, 10, 962}, + dictWord{9, 10, 458}, + dictWord{139, 10, 691}, + dictWord{132, 10, 312}, + dictWord{135, 10, 1642}, + dictWord{ + 6, + 0, + 17, + }, + dictWord{6, 0, 1304}, + dictWord{7, 0, 16}, + dictWord{7, 0, 1001}, + dictWord{9, 0, 886}, + dictWord{10, 0, 489}, + dictWord{10, 0, 800}, + dictWord{11, 0, 782}, + dictWord{12, 0, 320}, + dictWord{13, 0, 467}, + dictWord{14, 0, 145}, + dictWord{14, 0, 387}, + dictWord{143, 0, 119}, + dictWord{135, 0, 1982}, + dictWord{17, 0, 17}, + dictWord{7, 11, 1461}, + dictWord{140, 11, 91}, + dictWord{4, 10, 236}, + dictWord{132, 11, 602}, + dictWord{138, 0, 907}, + dictWord{136, 0, 110}, + dictWord{7, 0, 272}, + dictWord{19, 0, 53}, + dictWord{5, 10, 836}, + dictWord{5, 10, 857}, + dictWord{134, 10, 1680}, + dictWord{5, 0, 458}, + dictWord{7, 11, 1218}, + dictWord{136, 11, 303}, + dictWord{7, 0, 1983}, + dictWord{8, 0, 0}, + dictWord{8, 0, 171}, + dictWord{9, 0, 120}, + dictWord{9, 0, 732}, + dictWord{10, 0, 473}, + dictWord{11, 0, 656}, + dictWord{ + 11, + 0, + 998, + }, + dictWord{18, 0, 0}, + dictWord{18, 0, 2}, + dictWord{19, 0, 21}, + dictWord{10, 10, 68}, + dictWord{139, 10, 494}, + dictWord{137, 11, 662}, + dictWord{4, 11, 13}, + dictWord{5, 11, 567}, + dictWord{7, 11, 1498}, + dictWord{9, 11, 124}, + dictWord{11, 11, 521}, + dictWord{140, 11, 405}, + dictWord{4, 10, 81}, + dictWord{139, 10, 867}, + dictWord{135, 11, 1006}, + dictWord{7, 11, 800}, + dictWord{7, 11, 1783}, + dictWord{138, 11, 12}, + dictWord{9, 0, 295}, + dictWord{10, 0, 443}, + dictWord{ + 5, + 10, + 282, + }, + dictWord{8, 10, 650}, + dictWord{137, 10, 907}, + dictWord{132, 11, 735}, + dictWord{4, 11, 170}, + dictWord{4, 10, 775}, + dictWord{135, 11, 323}, + dictWord{ + 6, + 0, + 1844, + }, + dictWord{10, 0, 924}, + dictWord{11, 11, 844}, + dictWord{12, 11, 104}, + dictWord{140, 11, 625}, + dictWord{5, 11, 304}, + dictWord{7, 11, 1403}, + dictWord{140, 11, 498}, + dictWord{134, 0, 1232}, + dictWord{4, 0, 519}, + dictWord{10, 0, 70}, + dictWord{12, 0, 26}, + dictWord{14, 0, 17}, + dictWord{14, 0, 178}, + dictWord{ + 15, + 0, + 34, + }, + dictWord{149, 0, 12}, + dictWord{132, 0, 993}, + dictWord{4, 11, 148}, + dictWord{133, 11, 742}, + dictWord{6, 0, 31}, + dictWord{7, 0, 491}, + dictWord{7, 0, 530}, + dictWord{8, 0, 592}, + dictWord{11, 0, 53}, + dictWord{11, 0, 779}, + dictWord{12, 0, 167}, + dictWord{12, 0, 411}, + dictWord{14, 0, 14}, + dictWord{14, 0, 136}, + dictWord{ + 15, + 0, + 72, + }, + dictWord{16, 0, 17}, + dictWord{144, 0, 72}, + dictWord{133, 0, 907}, + dictWord{134, 0, 733}, + dictWord{133, 11, 111}, + dictWord{4, 10, 71}, + dictWord{ + 5, + 10, + 376, + }, + dictWord{7, 10, 119}, + dictWord{138, 10, 665}, + dictWord{136, 0, 55}, + dictWord{8, 0, 430}, + dictWord{136, 11, 430}, + dictWord{4, 0, 208}, + dictWord{ + 5, + 0, + 106, + }, + dictWord{6, 0, 531}, + dictWord{8, 0, 408}, + dictWord{9, 0, 188}, + dictWord{138, 0, 572}, + dictWord{12, 0, 56}, + dictWord{11, 10, 827}, + dictWord{14, 10, 34}, + dictWord{143, 10, 148}, + dictWord{134, 0, 1693}, + dictWord{133, 11, 444}, + dictWord{132, 10, 479}, + dictWord{140, 0, 441}, + dictWord{9, 0, 449}, + dictWord{ + 10, + 0, + 192, + }, + dictWord{138, 0, 740}, + dictWord{134, 0, 928}, + dictWord{4, 0, 241}, + dictWord{7, 10, 607}, + dictWord{136, 10, 99}, + dictWord{8, 11, 123}, + dictWord{ + 15, + 11, + 6, + }, + dictWord{144, 11, 7}, + dictWord{6, 11, 285}, + dictWord{8, 11, 654}, + dictWord{11, 11, 749}, + dictWord{12, 11, 190}, + dictWord{12, 11, 327}, + dictWord{ + 13, + 11, + 120, + }, + dictWord{13, 11, 121}, + dictWord{13, 11, 327}, + dictWord{15, 11, 47}, + dictWord{146, 11, 40}, + dictWord{4, 10, 41}, + dictWord{5, 10, 74}, + dictWord{ + 7, + 10, + 1627, + }, + dictWord{11, 10, 871}, + dictWord{140, 10, 619}, + dictWord{7, 0, 1525}, + dictWord{11, 10, 329}, + dictWord{11, 10, 965}, + dictWord{12, 10, 241}, + dictWord{14, 10, 354}, + dictWord{15, 10, 22}, + dictWord{148, 10, 63}, + dictWord{132, 0, 259}, + dictWord{135, 11, 183}, + dictWord{9, 10, 209}, + dictWord{ + 137, + 10, + 300, + }, + dictWord{5, 11, 937}, + dictWord{135, 11, 100}, + dictWord{133, 10, 98}, + dictWord{4, 0, 173}, + dictWord{5, 0, 312}, + dictWord{5, 0, 512}, + dictWord{ + 135, + 0, + 1285, + }, + dictWord{141, 0, 185}, + dictWord{7, 0, 1603}, + dictWord{7, 0, 1691}, + dictWord{9, 0, 464}, + dictWord{11, 0, 195}, + dictWord{12, 0, 279}, + dictWord{ + 12, + 0, + 448, + }, + dictWord{14, 0, 11}, + dictWord{147, 0, 102}, + dictWord{135, 0, 1113}, + dictWord{133, 10, 984}, + dictWord{4, 0, 452}, + dictWord{5, 0, 583}, + dictWord{ + 135, + 0, + 720, + }, + dictWord{4, 0, 547}, + dictWord{5, 0, 817}, + dictWord{6, 0, 433}, + dictWord{7, 0, 593}, + dictWord{7, 0, 1378}, + dictWord{8, 0, 161}, + dictWord{9, 0, 284}, + dictWord{ + 10, + 0, + 313, + }, + dictWord{139, 0, 886}, + dictWord{8, 0, 722}, + dictWord{4, 10, 182}, + dictWord{6, 10, 205}, + dictWord{135, 10, 220}, + dictWord{150, 0, 13}, + dictWord{ + 4, + 10, + 42, + }, + dictWord{9, 10, 205}, + dictWord{9, 10, 786}, + dictWord{138, 10, 659}, + dictWord{6, 0, 289}, + dictWord{7, 0, 1670}, + dictWord{12, 0, 57}, + dictWord{151, 0, 4}, + dictWord{132, 10, 635}, + dictWord{14, 0, 43}, + dictWord{146, 0, 21}, + dictWord{139, 10, 533}, + dictWord{135, 0, 1694}, + dictWord{8, 0, 420}, + dictWord{ + 139, + 0, + 193, + }, + dictWord{135, 0, 409}, + dictWord{132, 10, 371}, + dictWord{4, 10, 272}, + dictWord{135, 10, 836}, + dictWord{5, 10, 825}, + dictWord{134, 10, 1640}, + dictWord{5, 11, 251}, + dictWord{5, 11, 956}, + dictWord{8, 11, 268}, + dictWord{9, 11, 214}, + dictWord{146, 11, 142}, + dictWord{138, 0, 308}, + dictWord{6, 0, 1863}, + dictWord{141, 11, 37}, + dictWord{137, 10, 879}, + dictWord{7, 10, 317}, + dictWord{135, 10, 569}, + dictWord{132, 11, 294}, + dictWord{134, 0, 790}, + dictWord{ + 5, + 0, + 1002, + }, + dictWord{136, 0, 745}, + dictWord{5, 11, 346}, + dictWord{5, 11, 711}, + dictWord{136, 11, 390}, + dictWord{135, 0, 289}, + dictWord{5, 0, 504}, + dictWord{ + 11, + 0, + 68, + }, + dictWord{137, 10, 307}, + dictWord{4, 0, 239}, + dictWord{6, 0, 477}, + dictWord{7, 0, 1607}, + dictWord{139, 0, 617}, + dictWord{149, 0, 13}, + dictWord{ + 133, + 0, + 609, + }, + dictWord{133, 11, 624}, + dictWord{5, 11, 783}, + dictWord{7, 11, 1998}, + dictWord{135, 11, 2047}, + dictWord{133, 10, 525}, + dictWord{132, 0, 367}, + dictWord{132, 11, 594}, + dictWord{6, 0, 528}, + dictWord{133, 10, 493}, + dictWord{4, 10, 174}, + dictWord{135, 10, 911}, + dictWord{8, 10, 417}, + dictWord{ + 137, + 10, + 782, + }, + dictWord{132, 0, 694}, + dictWord{7, 0, 548}, + dictWord{137, 0, 58}, + dictWord{4, 10, 32}, + dictWord{5, 10, 215}, + dictWord{6, 10, 269}, + dictWord{7, 10, 1782}, + dictWord{7, 10, 1892}, + dictWord{10, 10, 16}, + dictWord{11, 10, 822}, + dictWord{11, 10, 954}, + dictWord{141, 10, 481}, + dictWord{140, 0, 687}, + dictWord{ + 7, + 0, + 1749, + }, + dictWord{136, 10, 477}, + dictWord{132, 11, 569}, + dictWord{133, 10, 308}, + dictWord{135, 10, 1088}, + dictWord{4, 0, 661}, + dictWord{138, 0, 1004}, + dictWord{5, 11, 37}, + dictWord{6, 11, 39}, + dictWord{6, 11, 451}, + dictWord{7, 11, 218}, + dictWord{7, 11, 667}, + dictWord{7, 11, 1166}, + dictWord{7, 11, 1687}, + dictWord{8, 11, 662}, + dictWord{144, 11, 2}, + dictWord{9, 0, 445}, + dictWord{12, 0, 53}, + dictWord{13, 0, 492}, + dictWord{5, 10, 126}, + dictWord{8, 10, 297}, + dictWord{ + 9, + 10, + 366, + }, + dictWord{140, 10, 374}, + dictWord{7, 10, 1551}, + dictWord{139, 10, 361}, + dictWord{148, 0, 74}, + dictWord{134, 11, 508}, + dictWord{135, 0, 213}, + dictWord{132, 10, 175}, + dictWord{132, 10, 685}, + dictWord{6, 0, 760}, + dictWord{6, 0, 834}, + dictWord{134, 0, 1248}, + dictWord{7, 11, 453}, + dictWord{7, 11, 635}, + dictWord{7, 11, 796}, + dictWord{8, 11, 331}, + dictWord{9, 11, 328}, + dictWord{9, 11, 330}, + dictWord{9, 11, 865}, + dictWord{10, 11, 119}, + dictWord{10, 11, 235}, + dictWord{11, 11, 111}, + dictWord{11, 11, 129}, + dictWord{11, 11, 240}, + dictWord{12, 11, 31}, + dictWord{12, 11, 66}, + dictWord{12, 11, 222}, + dictWord{12, 11, 269}, + dictWord{12, 11, 599}, + dictWord{12, 11, 689}, + dictWord{13, 11, 186}, + dictWord{13, 11, 364}, + dictWord{142, 11, 345}, + dictWord{7, 0, 1672}, + dictWord{ + 139, + 0, + 189, + }, + dictWord{133, 10, 797}, + dictWord{133, 10, 565}, + dictWord{6, 0, 1548}, + dictWord{6, 11, 98}, + dictWord{7, 11, 585}, + dictWord{135, 11, 702}, + dictWord{ + 9, + 0, + 968, + }, + dictWord{15, 0, 192}, + dictWord{149, 0, 56}, + dictWord{4, 10, 252}, + dictWord{6, 11, 37}, + dictWord{7, 11, 299}, + dictWord{7, 10, 1068}, + dictWord{ + 7, + 11, + 1666, + }, + dictWord{8, 11, 195}, + dictWord{8, 11, 316}, + dictWord{9, 11, 178}, + dictWord{9, 11, 276}, + dictWord{9, 11, 339}, + dictWord{9, 11, 536}, + dictWord{ + 10, + 11, + 102, + }, + dictWord{10, 11, 362}, + dictWord{10, 10, 434}, + dictWord{10, 11, 785}, + dictWord{11, 11, 55}, + dictWord{11, 11, 149}, + dictWord{11, 10, 228}, + dictWord{ + 11, + 10, + 426, + }, + dictWord{11, 11, 773}, + dictWord{13, 10, 231}, + dictWord{13, 11, 416}, + dictWord{13, 11, 419}, + dictWord{14, 11, 38}, + dictWord{14, 11, 41}, + dictWord{14, 11, 210}, + dictWord{18, 10, 106}, + dictWord{148, 10, 87}, + dictWord{4, 0, 751}, + dictWord{11, 0, 390}, + dictWord{140, 0, 32}, + dictWord{4, 0, 409}, + dictWord{133, 0, 78}, + dictWord{11, 11, 458}, + dictWord{12, 11, 15}, + dictWord{140, 11, 432}, + dictWord{7, 0, 1602}, + dictWord{10, 0, 257}, + dictWord{10, 0, 698}, + dictWord{11, 0, 544}, + dictWord{11, 0, 585}, + dictWord{12, 0, 212}, + dictWord{13, 0, 307}, + dictWord{5, 10, 231}, + dictWord{7, 10, 601}, + dictWord{9, 10, 277}, + dictWord{ + 9, + 10, + 674, + }, + dictWord{10, 10, 178}, + dictWord{10, 10, 418}, + dictWord{10, 10, 509}, + dictWord{11, 10, 531}, + dictWord{12, 10, 113}, + dictWord{12, 10, 475}, + dictWord{13, 10, 99}, + dictWord{142, 10, 428}, + dictWord{6, 0, 473}, + dictWord{145, 0, 105}, + dictWord{6, 0, 1949}, + dictWord{15, 0, 156}, + dictWord{133, 11, 645}, + dictWord{7, 10, 1591}, + dictWord{144, 10, 43}, + dictWord{135, 0, 1779}, + dictWord{135, 10, 1683}, + dictWord{4, 11, 290}, + dictWord{135, 11, 1356}, + dictWord{134, 0, 763}, + dictWord{6, 11, 70}, + dictWord{7, 11, 1292}, + dictWord{10, 11, 762}, + dictWord{139, 11, 288}, + dictWord{142, 0, 29}, + dictWord{140, 11, 428}, + dictWord{7, 0, 883}, + dictWord{7, 11, 131}, + dictWord{7, 11, 422}, + dictWord{8, 11, 210}, + dictWord{140, 11, 573}, + dictWord{134, 0, 488}, + dictWord{4, 10, 399}, + dictWord{5, 10, 119}, + dictWord{5, 10, 494}, + dictWord{7, 10, 751}, + dictWord{137, 10, 556}, + dictWord{133, 0, 617}, + dictWord{132, 11, 936}, + dictWord{ + 139, + 0, + 50, + }, + dictWord{7, 0, 1518}, + dictWord{139, 0, 694}, + dictWord{137, 0, 785}, + dictWord{4, 0, 546}, + dictWord{135, 0, 2042}, + dictWord{7, 11, 716}, + dictWord{ + 13, + 11, + 97, + }, + dictWord{141, 11, 251}, + dictWord{132, 11, 653}, + dictWord{145, 0, 22}, + dictWord{134, 0, 1016}, + dictWord{4, 0, 313}, + dictWord{133, 0, 577}, + dictWord{ + 136, + 11, + 657, + }, + dictWord{8, 0, 184}, + dictWord{141, 0, 433}, + dictWord{135, 0, 935}, + dictWord{6, 0, 720}, + dictWord{9, 0, 114}, + dictWord{146, 11, 80}, + dictWord{ + 12, + 0, + 186, + }, + dictWord{12, 0, 292}, + dictWord{14, 0, 100}, + dictWord{18, 0, 70}, + dictWord{7, 10, 594}, + dictWord{7, 10, 851}, + dictWord{7, 10, 1858}, + dictWord{ + 9, + 10, + 411, + }, + dictWord{9, 10, 574}, + dictWord{9, 10, 666}, + dictWord{9, 10, 737}, + dictWord{10, 10, 346}, + dictWord{10, 10, 712}, + dictWord{11, 10, 246}, + dictWord{ + 11, + 10, + 432, + }, + dictWord{11, 10, 517}, + dictWord{11, 10, 647}, + dictWord{11, 10, 679}, + dictWord{11, 10, 727}, + dictWord{12, 10, 304}, + dictWord{12, 10, 305}, + dictWord{12, 10, 323}, + dictWord{12, 10, 483}, + dictWord{12, 10, 572}, + dictWord{12, 10, 593}, + dictWord{12, 10, 602}, + dictWord{13, 10, 95}, + dictWord{13, 10, 101}, + dictWord{13, 10, 171}, + dictWord{13, 10, 315}, + dictWord{13, 10, 378}, + dictWord{13, 10, 425}, + dictWord{13, 10, 475}, + dictWord{14, 10, 63}, + dictWord{ + 14, + 10, + 380, + }, + dictWord{14, 10, 384}, + dictWord{15, 10, 133}, + dictWord{18, 10, 112}, + dictWord{148, 10, 72}, + dictWord{135, 10, 1093}, + dictWord{135, 11, 1836}, + dictWord{132, 10, 679}, + dictWord{137, 10, 203}, + dictWord{11, 0, 402}, + dictWord{12, 0, 109}, + dictWord{12, 0, 431}, + dictWord{13, 0, 179}, + dictWord{13, 0, 206}, + dictWord{14, 0, 217}, + dictWord{16, 0, 3}, + dictWord{148, 0, 53}, + dictWord{7, 11, 1368}, + dictWord{8, 11, 232}, + dictWord{8, 11, 361}, + dictWord{10, 11, 682}, + dictWord{138, 11, 742}, + dictWord{137, 10, 714}, + dictWord{5, 0, 886}, + dictWord{6, 0, 46}, + dictWord{6, 0, 1790}, + dictWord{7, 0, 14}, + dictWord{7, 0, 732}, + dictWord{ + 7, + 0, + 1654, + }, + dictWord{8, 0, 95}, + dictWord{8, 0, 327}, + dictWord{8, 0, 616}, + dictWord{9, 0, 892}, + dictWord{10, 0, 598}, + dictWord{10, 0, 769}, + dictWord{11, 0, 134}, + dictWord{11, 0, 747}, + dictWord{12, 0, 378}, + dictWord{14, 0, 97}, + dictWord{137, 11, 534}, + dictWord{4, 0, 969}, + dictWord{136, 10, 825}, + dictWord{137, 11, 27}, + dictWord{6, 0, 727}, + dictWord{142, 11, 12}, + dictWord{133, 0, 1021}, + dictWord{134, 0, 1190}, + dictWord{134, 11, 1657}, + dictWord{5, 10, 143}, + dictWord{ + 5, + 10, + 769, + }, + dictWord{6, 10, 1760}, + dictWord{7, 10, 682}, + dictWord{7, 10, 1992}, + dictWord{136, 10, 736}, + dictWord{132, 0, 153}, + dictWord{135, 11, 127}, + dictWord{133, 0, 798}, + dictWord{132, 0, 587}, + dictWord{6, 0, 598}, + dictWord{7, 0, 42}, + dictWord{8, 0, 695}, + dictWord{10, 0, 212}, + dictWord{11, 0, 158}, + dictWord{ + 14, + 0, + 196, + }, + dictWord{145, 0, 85}, + dictWord{133, 10, 860}, + dictWord{6, 0, 1929}, + dictWord{134, 0, 1933}, + dictWord{5, 0, 957}, + dictWord{5, 0, 1008}, + dictWord{ + 9, + 0, + 577, + }, + dictWord{12, 0, 141}, + dictWord{6, 10, 422}, + dictWord{7, 10, 0}, + dictWord{7, 10, 1544}, + dictWord{8, 11, 364}, + dictWord{11, 10, 990}, + dictWord{ + 12, + 10, + 453, + }, + dictWord{13, 10, 47}, + dictWord{141, 10, 266}, + dictWord{134, 0, 1319}, + dictWord{4, 0, 129}, + dictWord{135, 0, 465}, + dictWord{7, 0, 470}, + dictWord{ + 7, + 0, + 1057, + }, + dictWord{7, 0, 1201}, + dictWord{9, 0, 755}, + dictWord{11, 0, 906}, + dictWord{140, 0, 527}, + dictWord{7, 0, 908}, + dictWord{146, 0, 7}, + dictWord{5, 0, 148}, + dictWord{136, 0, 450}, + dictWord{5, 10, 515}, + dictWord{137, 10, 131}, + dictWord{7, 10, 1605}, + dictWord{11, 10, 962}, + dictWord{146, 10, 139}, + dictWord{ + 132, + 10, + 646, + }, + dictWord{134, 0, 1166}, + dictWord{4, 10, 396}, + dictWord{7, 10, 728}, + dictWord{9, 10, 117}, + dictWord{13, 10, 202}, + dictWord{148, 10, 51}, + dictWord{ + 6, + 10, + 121, + }, + dictWord{6, 10, 124}, + dictWord{6, 10, 357}, + dictWord{7, 10, 1138}, + dictWord{7, 10, 1295}, + dictWord{8, 10, 162}, + dictWord{139, 10, 655}, + dictWord{14, 0, 374}, + dictWord{142, 11, 374}, + dictWord{138, 0, 253}, + dictWord{139, 0, 1003}, + dictWord{5, 11, 909}, + dictWord{9, 11, 849}, + dictWord{ + 138, + 11, + 805, + }, + dictWord{133, 10, 237}, + dictWord{7, 11, 525}, + dictWord{7, 11, 1579}, + dictWord{8, 11, 497}, + dictWord{136, 11, 573}, + dictWord{137, 0, 46}, + dictWord{ + 132, + 0, + 879, + }, + dictWord{134, 0, 806}, + dictWord{135, 0, 1868}, + dictWord{6, 0, 1837}, + dictWord{134, 0, 1846}, + dictWord{6, 0, 730}, + dictWord{134, 0, 881}, + dictWord{7, 0, 965}, + dictWord{7, 0, 1460}, + dictWord{7, 0, 1604}, + dictWord{7, 11, 193}, + dictWord{7, 11, 397}, + dictWord{7, 11, 1105}, + dictWord{8, 11, 124}, + dictWord{ + 8, + 11, + 619, + }, + dictWord{9, 11, 305}, + dictWord{10, 11, 264}, + dictWord{11, 11, 40}, + dictWord{12, 11, 349}, + dictWord{13, 11, 134}, + dictWord{13, 11, 295}, + dictWord{14, 11, 155}, + dictWord{15, 11, 120}, + dictWord{146, 11, 105}, + dictWord{136, 0, 506}, + dictWord{143, 0, 10}, + dictWord{4, 11, 262}, + dictWord{7, 11, 342}, + dictWord{7, 10, 571}, + dictWord{7, 10, 1877}, + dictWord{10, 10, 366}, + dictWord{141, 11, 23}, + dictWord{133, 11, 641}, + dictWord{10, 0, 22}, + dictWord{9, 10, 513}, + dictWord{10, 10, 39}, + dictWord{12, 10, 122}, + dictWord{140, 10, 187}, + dictWord{135, 11, 1431}, + dictWord{150, 11, 49}, + dictWord{4, 11, 99}, + dictWord{ + 6, + 11, + 250, + }, + dictWord{6, 11, 346}, + dictWord{8, 11, 127}, + dictWord{138, 11, 81}, + dictWord{6, 0, 2014}, + dictWord{8, 0, 928}, + dictWord{10, 0, 960}, + dictWord{10, 0, 979}, + dictWord{140, 0, 996}, + dictWord{134, 0, 296}, + dictWord{132, 11, 915}, + dictWord{5, 11, 75}, + dictWord{9, 11, 517}, + dictWord{10, 11, 470}, + dictWord{ + 12, + 11, + 155, + }, + dictWord{141, 11, 224}, + dictWord{137, 10, 873}, + dictWord{4, 0, 854}, + dictWord{140, 11, 18}, + dictWord{134, 0, 587}, + dictWord{7, 10, 107}, + dictWord{ + 7, + 10, + 838, + }, + dictWord{8, 10, 550}, + dictWord{138, 10, 401}, + dictWord{11, 0, 636}, + dictWord{15, 0, 145}, + dictWord{17, 0, 34}, + dictWord{19, 0, 50}, + dictWord{ + 23, + 0, + 20, + }, + dictWord{11, 10, 588}, + dictWord{11, 10, 864}, + dictWord{11, 10, 968}, + dictWord{143, 10, 160}, + dictWord{135, 11, 216}, + dictWord{7, 0, 982}, + dictWord{ + 10, + 0, + 32, + }, + dictWord{143, 0, 56}, + dictWord{133, 10, 768}, + dictWord{133, 11, 954}, + dictWord{6, 11, 304}, + dictWord{7, 11, 1114}, + dictWord{8, 11, 418}, + dictWord{ + 10, + 11, + 345, + }, + dictWord{11, 11, 341}, + dictWord{11, 11, 675}, + dictWord{141, 11, 40}, + dictWord{9, 11, 410}, + dictWord{139, 11, 425}, + dictWord{136, 0, 941}, + dictWord{5, 0, 435}, + dictWord{132, 10, 894}, + dictWord{5, 0, 85}, + dictWord{6, 0, 419}, + dictWord{7, 0, 134}, + dictWord{7, 0, 305}, + dictWord{7, 0, 361}, + dictWord{ + 7, + 0, + 1337, + }, + dictWord{8, 0, 71}, + dictWord{140, 0, 519}, + dictWord{140, 0, 688}, + dictWord{135, 0, 740}, + dictWord{5, 0, 691}, + dictWord{7, 0, 345}, + dictWord{9, 0, 94}, + dictWord{140, 0, 169}, + dictWord{5, 0, 183}, + dictWord{6, 0, 582}, + dictWord{10, 0, 679}, + dictWord{140, 0, 435}, + dictWord{134, 11, 14}, + dictWord{6, 0, 945}, + dictWord{135, 0, 511}, + dictWord{134, 11, 1708}, + dictWord{5, 11, 113}, + dictWord{6, 11, 243}, + dictWord{7, 11, 1865}, + dictWord{11, 11, 161}, + dictWord{16, 11, 37}, + dictWord{145, 11, 99}, + dictWord{132, 11, 274}, + dictWord{137, 0, 539}, + dictWord{7, 0, 1993}, + dictWord{8, 0, 684}, + dictWord{134, 10, 272}, + dictWord{ + 6, + 0, + 659, + }, + dictWord{134, 0, 982}, + dictWord{4, 10, 9}, + dictWord{5, 10, 128}, + dictWord{7, 10, 368}, + dictWord{11, 10, 480}, + dictWord{148, 10, 3}, + dictWord{ + 134, + 0, + 583, + }, + dictWord{132, 0, 803}, + dictWord{133, 0, 704}, + dictWord{4, 0, 179}, + dictWord{5, 0, 198}, + dictWord{133, 0, 697}, + dictWord{7, 0, 347}, + dictWord{7, 0, 971}, + dictWord{8, 0, 181}, + dictWord{10, 0, 711}, + dictWord{135, 11, 166}, + dictWord{136, 10, 682}, + dictWord{4, 10, 2}, + dictWord{7, 10, 545}, + dictWord{7, 10, 894}, + dictWord{136, 11, 521}, + dictWord{135, 0, 481}, + dictWord{132, 0, 243}, + dictWord{5, 0, 203}, + dictWord{7, 0, 19}, + dictWord{7, 0, 71}, + dictWord{7, 0, 113}, + dictWord{ + 10, + 0, + 405, + }, + dictWord{11, 0, 357}, + dictWord{142, 0, 240}, + dictWord{5, 11, 725}, + dictWord{5, 11, 727}, + dictWord{135, 11, 1811}, + dictWord{6, 0, 826}, + dictWord{ + 137, + 11, + 304, + }, + dictWord{7, 0, 1450}, + dictWord{139, 0, 99}, + dictWord{133, 11, 654}, + dictWord{134, 0, 492}, + dictWord{5, 0, 134}, + dictWord{6, 0, 408}, + dictWord{ + 6, + 0, + 495, + }, + dictWord{7, 0, 1593}, + dictWord{6, 11, 273}, + dictWord{10, 11, 188}, + dictWord{13, 11, 377}, + dictWord{146, 11, 77}, + dictWord{9, 10, 769}, + dictWord{ + 140, + 10, + 185, + }, + dictWord{135, 11, 410}, + dictWord{142, 0, 4}, + dictWord{4, 0, 665}, + dictWord{134, 11, 1785}, + dictWord{4, 0, 248}, + dictWord{7, 0, 137}, + dictWord{ + 137, + 0, + 349, + }, + dictWord{5, 10, 530}, + dictWord{142, 10, 113}, + dictWord{7, 0, 1270}, + dictWord{139, 0, 612}, + dictWord{132, 11, 780}, + dictWord{5, 0, 371}, + dictWord{135, 0, 563}, + dictWord{135, 0, 826}, + dictWord{6, 0, 1535}, + dictWord{23, 0, 21}, + dictWord{151, 0, 23}, + dictWord{4, 0, 374}, + dictWord{7, 0, 547}, + dictWord{ + 7, + 0, + 1700, + }, + dictWord{7, 0, 1833}, + dictWord{139, 0, 858}, + dictWord{133, 10, 556}, + dictWord{7, 11, 612}, + dictWord{8, 11, 545}, + dictWord{8, 11, 568}, + dictWord{ + 8, + 11, + 642, + }, + dictWord{9, 11, 717}, + dictWord{10, 11, 541}, + dictWord{10, 11, 763}, + dictWord{11, 11, 449}, + dictWord{12, 11, 489}, + dictWord{13, 11, 153}, + dictWord{ + 13, + 11, + 296, + }, + dictWord{14, 11, 138}, + dictWord{14, 11, 392}, + dictWord{15, 11, 50}, + dictWord{16, 11, 6}, + dictWord{16, 11, 12}, + dictWord{148, 11, 9}, + dictWord{ + 9, + 0, + 311, + }, + dictWord{141, 0, 42}, + dictWord{8, 10, 16}, + dictWord{140, 10, 568}, + dictWord{6, 0, 1968}, + dictWord{6, 0, 2027}, + dictWord{138, 0, 991}, + dictWord{ + 6, + 0, + 1647, + }, + dictWord{7, 0, 1552}, + dictWord{7, 0, 2010}, + dictWord{9, 0, 494}, + dictWord{137, 0, 509}, + dictWord{133, 11, 948}, + dictWord{6, 10, 186}, + dictWord{ + 137, + 10, + 426, + }, + dictWord{134, 0, 769}, + dictWord{134, 0, 642}, + dictWord{132, 10, 585}, + dictWord{6, 0, 123}, + dictWord{7, 0, 214}, + dictWord{9, 0, 728}, + dictWord{ + 10, + 0, + 157, + }, + dictWord{11, 0, 346}, + dictWord{11, 0, 662}, + dictWord{143, 0, 106}, + dictWord{142, 11, 381}, + dictWord{135, 0, 1435}, + dictWord{4, 11, 532}, + dictWord{ + 5, + 11, + 706, + }, + dictWord{135, 11, 662}, + dictWord{5, 11, 837}, + dictWord{134, 11, 1651}, + dictWord{4, 10, 93}, + dictWord{5, 10, 252}, + dictWord{6, 10, 229}, + dictWord{ + 7, + 10, + 291, + }, + dictWord{9, 10, 550}, + dictWord{139, 10, 644}, + dictWord{148, 0, 79}, + dictWord{137, 10, 749}, + dictWord{134, 0, 1425}, + dictWord{ + 137, + 10, + 162, + }, + dictWord{4, 11, 362}, + dictWord{7, 11, 52}, + dictWord{7, 11, 303}, + dictWord{140, 11, 166}, + dictWord{132, 10, 381}, + dictWord{4, 11, 330}, + dictWord{ + 7, + 11, + 933, + }, + dictWord{7, 11, 2012}, + dictWord{136, 11, 292}, + dictWord{135, 11, 767}, + dictWord{4, 0, 707}, + dictWord{5, 0, 588}, + dictWord{6, 0, 393}, + dictWord{ + 13, + 0, + 106, + }, + dictWord{18, 0, 49}, + dictWord{147, 0, 41}, + dictWord{6, 0, 211}, + dictWord{7, 0, 1690}, + dictWord{11, 0, 486}, + dictWord{140, 0, 369}, + dictWord{ + 137, + 11, + 883, + }, + dictWord{4, 11, 703}, + dictWord{135, 11, 207}, + dictWord{4, 0, 187}, + dictWord{5, 0, 184}, + dictWord{5, 0, 690}, + dictWord{7, 0, 1869}, + dictWord{10, 0, 756}, + dictWord{139, 0, 783}, + dictWord{132, 11, 571}, + dictWord{134, 0, 1382}, + dictWord{5, 0, 175}, + dictWord{6, 10, 77}, + dictWord{6, 10, 157}, + dictWord{7, 10, 974}, + dictWord{7, 10, 1301}, + dictWord{7, 10, 1339}, + dictWord{7, 10, 1490}, + dictWord{7, 10, 1873}, + dictWord{137, 10, 628}, + dictWord{134, 0, 1493}, + dictWord{ + 5, + 11, + 873, + }, + dictWord{133, 11, 960}, + dictWord{134, 0, 1007}, + dictWord{12, 11, 93}, + dictWord{12, 11, 501}, + dictWord{13, 11, 362}, + dictWord{14, 11, 151}, + dictWord{15, 11, 40}, + dictWord{15, 11, 59}, + dictWord{16, 11, 46}, + dictWord{17, 11, 25}, + dictWord{18, 11, 14}, + dictWord{18, 11, 134}, + dictWord{19, 11, 25}, + dictWord{ + 19, + 11, + 69, + }, + dictWord{20, 11, 16}, + dictWord{20, 11, 19}, + dictWord{20, 11, 66}, + dictWord{21, 11, 23}, + dictWord{21, 11, 25}, + dictWord{150, 11, 42}, + dictWord{ + 11, + 10, + 919, + }, + dictWord{141, 10, 409}, + dictWord{134, 0, 219}, + dictWord{5, 0, 582}, + dictWord{6, 0, 1646}, + dictWord{7, 0, 99}, + dictWord{7, 0, 1962}, + dictWord{ + 7, + 0, + 1986, + }, + dictWord{8, 0, 515}, + dictWord{8, 0, 773}, + dictWord{9, 0, 23}, + dictWord{9, 0, 491}, + dictWord{12, 0, 620}, + dictWord{142, 0, 93}, + dictWord{133, 0, 851}, + dictWord{5, 11, 33}, + dictWord{134, 11, 470}, + dictWord{135, 11, 1291}, + dictWord{134, 0, 1278}, + dictWord{135, 11, 1882}, + dictWord{135, 10, 1489}, + dictWord{132, 0, 1000}, + dictWord{138, 0, 982}, + dictWord{8, 0, 762}, + dictWord{8, 0, 812}, + dictWord{137, 0, 910}, + dictWord{6, 11, 47}, + dictWord{7, 11, 90}, + dictWord{ + 7, + 11, + 664, + }, + dictWord{7, 11, 830}, + dictWord{7, 11, 1380}, + dictWord{7, 11, 2025}, + dictWord{8, 11, 448}, + dictWord{136, 11, 828}, + dictWord{4, 0, 98}, + dictWord{ + 4, + 0, + 940, + }, + dictWord{6, 0, 1819}, + dictWord{6, 0, 1834}, + dictWord{6, 0, 1841}, + dictWord{7, 0, 1365}, + dictWord{8, 0, 859}, + dictWord{8, 0, 897}, + dictWord{8, 0, 918}, + dictWord{9, 0, 422}, + dictWord{9, 0, 670}, + dictWord{10, 0, 775}, + dictWord{10, 0, 894}, + dictWord{10, 0, 909}, + dictWord{10, 0, 910}, + dictWord{10, 0, 935}, + dictWord{ + 11, + 0, + 210, + }, + dictWord{12, 0, 750}, + dictWord{12, 0, 755}, + dictWord{13, 0, 26}, + dictWord{13, 0, 457}, + dictWord{13, 0, 476}, + dictWord{16, 0, 100}, + dictWord{16, 0, 109}, + dictWord{18, 0, 173}, + dictWord{18, 0, 175}, + dictWord{8, 10, 398}, + dictWord{9, 10, 681}, + dictWord{139, 10, 632}, + dictWord{9, 11, 417}, + dictWord{ + 137, + 11, + 493, + }, + dictWord{136, 10, 645}, + dictWord{138, 0, 906}, + dictWord{134, 0, 1730}, + dictWord{134, 10, 20}, + dictWord{133, 11, 1019}, + dictWord{134, 0, 1185}, + dictWord{10, 0, 40}, + dictWord{136, 10, 769}, + dictWord{9, 0, 147}, + dictWord{134, 11, 208}, + dictWord{140, 0, 650}, + dictWord{5, 0, 209}, + dictWord{6, 0, 30}, + dictWord{11, 0, 56}, + dictWord{139, 0, 305}, + dictWord{132, 0, 553}, + dictWord{138, 11, 344}, + dictWord{6, 11, 68}, + dictWord{7, 11, 398}, + dictWord{7, 11, 448}, + dictWord{ + 7, + 11, + 1629, + }, + dictWord{7, 11, 1813}, + dictWord{8, 11, 387}, + dictWord{8, 11, 442}, + dictWord{9, 11, 710}, + dictWord{10, 11, 282}, + dictWord{138, 11, 722}, + dictWord{5, 0, 597}, + dictWord{14, 0, 20}, + dictWord{142, 11, 20}, + dictWord{135, 0, 1614}, + dictWord{135, 10, 1757}, + dictWord{4, 0, 150}, + dictWord{5, 0, 303}, + dictWord{6, 0, 327}, + dictWord{135, 10, 937}, + dictWord{16, 0, 49}, + dictWord{7, 10, 1652}, + dictWord{144, 11, 49}, + dictWord{8, 0, 192}, + dictWord{10, 0, 78}, + dictWord{ + 141, + 0, + 359, + }, + dictWord{135, 0, 786}, + dictWord{143, 0, 134}, + dictWord{6, 0, 1638}, + dictWord{7, 0, 79}, + dictWord{7, 0, 496}, + dictWord{9, 0, 138}, + dictWord{ + 10, + 0, + 336, + }, + dictWord{11, 0, 12}, + dictWord{12, 0, 412}, + dictWord{12, 0, 440}, + dictWord{142, 0, 305}, + dictWord{136, 11, 491}, + dictWord{4, 10, 579}, + dictWord{ + 5, + 10, + 226, + }, + dictWord{5, 10, 323}, + dictWord{135, 10, 960}, + dictWord{7, 0, 204}, + dictWord{7, 0, 415}, + dictWord{8, 0, 42}, + dictWord{10, 0, 85}, + dictWord{139, 0, 564}, + dictWord{132, 0, 614}, + dictWord{4, 11, 403}, + dictWord{5, 11, 441}, + dictWord{7, 11, 450}, + dictWord{11, 11, 101}, + dictWord{12, 11, 193}, + dictWord{141, 11, 430}, + dictWord{135, 11, 1927}, + dictWord{135, 11, 1330}, + dictWord{4, 0, 3}, + dictWord{5, 0, 247}, + dictWord{5, 0, 644}, + dictWord{7, 0, 744}, + dictWord{7, 0, 1207}, + dictWord{7, 0, 1225}, + dictWord{7, 0, 1909}, + dictWord{146, 0, 147}, + dictWord{136, 0, 942}, + dictWord{4, 0, 1019}, + dictWord{134, 0, 2023}, + dictWord{5, 11, 679}, + dictWord{133, 10, 973}, + dictWord{5, 0, 285}, + dictWord{9, 0, 67}, + dictWord{13, 0, 473}, + dictWord{143, 0, 82}, + dictWord{7, 11, 328}, + dictWord{137, 11, 326}, + dictWord{151, 0, 8}, + dictWord{6, 10, 135}, + dictWord{135, 10, 1176}, + dictWord{135, 11, 1128}, + dictWord{134, 0, 1309}, + dictWord{135, 11, 1796}, + dictWord{ + 135, + 10, + 314, + }, + dictWord{4, 11, 574}, + dictWord{7, 11, 350}, + dictWord{7, 11, 1024}, + dictWord{8, 11, 338}, + dictWord{9, 11, 677}, + dictWord{10, 11, 808}, + dictWord{ + 139, + 11, + 508, + }, + dictWord{7, 11, 818}, + dictWord{17, 11, 14}, + dictWord{17, 11, 45}, + dictWord{18, 11, 75}, + dictWord{148, 11, 18}, + dictWord{146, 10, 4}, + dictWord{ + 135, + 11, + 1081, + }, + dictWord{4, 0, 29}, + dictWord{6, 0, 532}, + dictWord{7, 0, 1628}, + dictWord{7, 0, 1648}, + dictWord{9, 0, 350}, + dictWord{10, 0, 433}, + dictWord{11, 0, 97}, + dictWord{11, 0, 557}, + dictWord{11, 0, 745}, + dictWord{12, 0, 289}, + dictWord{12, 0, 335}, + dictWord{12, 0, 348}, + dictWord{12, 0, 606}, + dictWord{13, 0, 116}, + dictWord{13, 0, 233}, + dictWord{13, 0, 466}, + dictWord{14, 0, 181}, + dictWord{14, 0, 209}, + dictWord{14, 0, 232}, + dictWord{14, 0, 236}, + dictWord{14, 0, 300}, + dictWord{ + 16, + 0, + 41, + }, + dictWord{148, 0, 97}, + dictWord{7, 0, 318}, + dictWord{6, 10, 281}, + dictWord{8, 10, 282}, + dictWord{8, 10, 480}, + dictWord{8, 10, 499}, + dictWord{9, 10, 198}, + dictWord{10, 10, 143}, + dictWord{10, 10, 169}, + dictWord{10, 10, 211}, + dictWord{10, 10, 417}, + dictWord{10, 10, 574}, + dictWord{11, 10, 147}, + dictWord{ + 11, + 10, + 395, + }, + dictWord{12, 10, 75}, + dictWord{12, 10, 407}, + dictWord{12, 10, 608}, + dictWord{13, 10, 500}, + dictWord{142, 10, 251}, + dictWord{135, 11, 1676}, + dictWord{135, 11, 2037}, + dictWord{135, 0, 1692}, + dictWord{5, 0, 501}, + dictWord{7, 0, 1704}, + dictWord{9, 0, 553}, + dictWord{11, 0, 520}, + dictWord{12, 0, 557}, + dictWord{141, 0, 249}, + dictWord{6, 0, 1527}, + dictWord{14, 0, 324}, + dictWord{15, 0, 55}, + dictWord{15, 0, 80}, + dictWord{14, 11, 324}, + dictWord{15, 11, 55}, + dictWord{143, 11, 80}, + dictWord{135, 10, 1776}, + dictWord{8, 0, 988}, + dictWord{137, 11, 297}, + dictWord{132, 10, 419}, + dictWord{142, 0, 223}, + dictWord{ + 139, + 11, + 234, + }, + dictWord{7, 0, 1123}, + dictWord{12, 0, 508}, + dictWord{14, 0, 102}, + dictWord{14, 0, 226}, + dictWord{144, 0, 57}, + dictWord{4, 10, 138}, + dictWord{ + 7, + 10, + 1012, + }, + dictWord{7, 10, 1280}, + dictWord{137, 10, 76}, + dictWord{7, 0, 1764}, + dictWord{5, 10, 29}, + dictWord{140, 10, 638}, + dictWord{134, 0, 2015}, + dictWord{134, 0, 1599}, + dictWord{138, 11, 56}, + dictWord{6, 11, 306}, + dictWord{7, 11, 1140}, + dictWord{7, 11, 1340}, + dictWord{8, 11, 133}, + dictWord{ + 138, + 11, + 449, + }, + dictWord{139, 11, 1011}, + dictWord{6, 10, 1710}, + dictWord{135, 10, 2038}, + dictWord{7, 11, 1763}, + dictWord{140, 11, 310}, + dictWord{6, 0, 129}, + dictWord{4, 10, 17}, + dictWord{5, 10, 23}, + dictWord{7, 10, 995}, + dictWord{11, 10, 383}, + dictWord{11, 10, 437}, + dictWord{12, 10, 460}, + dictWord{140, 10, 532}, + dictWord{5, 11, 329}, + dictWord{136, 11, 260}, + dictWord{133, 10, 862}, + dictWord{132, 0, 534}, + dictWord{6, 0, 811}, + dictWord{135, 0, 626}, + dictWord{ + 132, + 11, + 657, + }, + dictWord{4, 0, 25}, + dictWord{5, 0, 60}, + dictWord{6, 0, 504}, + dictWord{7, 0, 614}, + dictWord{7, 0, 1155}, + dictWord{12, 0, 0}, + dictWord{152, 11, 7}, + dictWord{ + 7, + 0, + 1248, + }, + dictWord{11, 0, 621}, + dictWord{139, 0, 702}, + dictWord{137, 0, 321}, + dictWord{8, 10, 70}, + dictWord{12, 10, 171}, + dictWord{141, 10, 272}, + dictWord{ + 10, + 10, + 233, + }, + dictWord{139, 10, 76}, + dictWord{4, 0, 379}, + dictWord{7, 0, 1397}, + dictWord{134, 10, 442}, + dictWord{5, 11, 66}, + dictWord{7, 11, 1896}, + dictWord{ + 136, + 11, + 288, + }, + dictWord{134, 11, 1643}, + dictWord{134, 10, 1709}, + dictWord{4, 11, 21}, + dictWord{5, 11, 91}, + dictWord{5, 11, 570}, + dictWord{5, 11, 648}, + dictWord{5, 11, 750}, + dictWord{5, 11, 781}, + dictWord{6, 11, 54}, + dictWord{6, 11, 112}, + dictWord{6, 11, 402}, + dictWord{6, 11, 1732}, + dictWord{7, 11, 315}, + dictWord{ + 7, + 11, + 749, + }, + dictWord{7, 11, 1347}, + dictWord{7, 11, 1900}, + dictWord{9, 11, 78}, + dictWord{9, 11, 508}, + dictWord{10, 11, 611}, + dictWord{11, 11, 510}, + dictWord{ + 11, + 11, + 728, + }, + dictWord{13, 11, 36}, + dictWord{14, 11, 39}, + dictWord{16, 11, 83}, + dictWord{17, 11, 124}, + dictWord{148, 11, 30}, + dictWord{4, 0, 118}, + dictWord{ + 6, + 0, + 274, + }, + dictWord{6, 0, 361}, + dictWord{7, 0, 75}, + dictWord{141, 0, 441}, + dictWord{10, 11, 322}, + dictWord{10, 11, 719}, + dictWord{139, 11, 407}, + dictWord{ + 147, + 10, + 119, + }, + dictWord{12, 11, 549}, + dictWord{14, 11, 67}, + dictWord{147, 11, 60}, + dictWord{11, 10, 69}, + dictWord{12, 10, 105}, + dictWord{12, 10, 117}, + dictWord{13, 10, 213}, + dictWord{14, 10, 13}, + dictWord{14, 10, 62}, + dictWord{14, 10, 177}, + dictWord{14, 10, 421}, + dictWord{15, 10, 19}, + dictWord{146, 10, 141}, + dictWord{9, 0, 841}, + dictWord{137, 10, 309}, + dictWord{7, 10, 608}, + dictWord{7, 10, 976}, + dictWord{8, 11, 125}, + dictWord{8, 11, 369}, + dictWord{8, 11, 524}, + dictWord{9, 10, 146}, + dictWord{10, 10, 206}, + dictWord{10, 11, 486}, + dictWord{10, 10, 596}, + dictWord{11, 11, 13}, + dictWord{11, 11, 381}, + dictWord{11, 11, 736}, + dictWord{11, 11, 766}, + dictWord{11, 11, 845}, + dictWord{13, 11, 114}, + dictWord{13, 10, 218}, + dictWord{13, 11, 292}, + dictWord{14, 11, 47}, + dictWord{ + 142, + 10, + 153, + }, + dictWord{12, 0, 693}, + dictWord{135, 11, 759}, + dictWord{5, 0, 314}, + dictWord{6, 0, 221}, + dictWord{7, 0, 419}, + dictWord{10, 0, 650}, + dictWord{11, 0, 396}, + dictWord{12, 0, 156}, + dictWord{13, 0, 369}, + dictWord{14, 0, 333}, + dictWord{145, 0, 47}, + dictWord{6, 11, 1684}, + dictWord{6, 11, 1731}, + dictWord{7, 11, 356}, + dictWord{7, 11, 1932}, + dictWord{8, 11, 54}, + dictWord{8, 11, 221}, + dictWord{9, 11, 225}, + dictWord{9, 11, 356}, + dictWord{10, 11, 77}, + dictWord{10, 11, 446}, + dictWord{10, 11, 731}, + dictWord{12, 11, 404}, + dictWord{141, 11, 491}, + dictWord{132, 11, 375}, + dictWord{4, 10, 518}, + dictWord{135, 10, 1136}, + dictWord{ + 4, + 0, + 913, + }, + dictWord{4, 11, 411}, + dictWord{11, 11, 643}, + dictWord{140, 11, 115}, + dictWord{4, 11, 80}, + dictWord{133, 11, 44}, + dictWord{8, 10, 689}, + dictWord{ + 137, + 10, + 863, + }, + dictWord{138, 0, 880}, + dictWord{4, 10, 18}, + dictWord{7, 10, 145}, + dictWord{7, 10, 444}, + dictWord{7, 10, 1278}, + dictWord{8, 10, 49}, + dictWord{ + 8, + 10, + 400, + }, + dictWord{9, 10, 71}, + dictWord{9, 10, 250}, + dictWord{10, 10, 459}, + dictWord{12, 10, 160}, + dictWord{144, 10, 24}, + dictWord{136, 0, 475}, + dictWord{ + 5, + 0, + 1016, + }, + dictWord{5, 11, 299}, + dictWord{135, 11, 1083}, + dictWord{7, 0, 602}, + dictWord{8, 0, 179}, + dictWord{10, 0, 781}, + dictWord{140, 0, 126}, + dictWord{ + 6, + 0, + 329, + }, + dictWord{138, 0, 111}, + dictWord{135, 0, 1864}, + dictWord{4, 11, 219}, + dictWord{7, 11, 1761}, + dictWord{137, 11, 86}, + dictWord{6, 0, 1888}, + dictWord{ + 6, + 0, + 1892, + }, + dictWord{6, 0, 1901}, + dictWord{6, 0, 1904}, + dictWord{9, 0, 953}, + dictWord{9, 0, 985}, + dictWord{9, 0, 991}, + dictWord{9, 0, 1001}, + dictWord{12, 0, 818}, + dictWord{12, 0, 846}, + dictWord{12, 0, 847}, + dictWord{12, 0, 861}, + dictWord{12, 0, 862}, + dictWord{12, 0, 873}, + dictWord{12, 0, 875}, + dictWord{12, 0, 877}, + dictWord{12, 0, 879}, + dictWord{12, 0, 881}, + dictWord{12, 0, 884}, + dictWord{12, 0, 903}, + dictWord{12, 0, 915}, + dictWord{12, 0, 926}, + dictWord{12, 0, 939}, + dictWord{ + 15, + 0, + 182, + }, + dictWord{15, 0, 219}, + dictWord{15, 0, 255}, + dictWord{18, 0, 191}, + dictWord{18, 0, 209}, + dictWord{18, 0, 211}, + dictWord{149, 0, 41}, + dictWord{ + 5, + 11, + 328, + }, + dictWord{135, 11, 918}, + dictWord{137, 0, 780}, + dictWord{12, 0, 82}, + dictWord{143, 0, 36}, + dictWord{133, 10, 1010}, + dictWord{5, 0, 821}, + dictWord{ + 134, + 0, + 1687, + }, + dictWord{133, 11, 514}, + dictWord{132, 0, 956}, + dictWord{134, 0, 1180}, + dictWord{10, 0, 112}, + dictWord{5, 10, 87}, + dictWord{7, 10, 313}, + dictWord{ + 7, + 10, + 1103, + }, + dictWord{10, 10, 582}, + dictWord{11, 10, 389}, + dictWord{11, 10, 813}, + dictWord{12, 10, 385}, + dictWord{13, 10, 286}, + dictWord{14, 10, 124}, + dictWord{146, 10, 108}, + dictWord{5, 0, 71}, + dictWord{7, 0, 1407}, + dictWord{9, 0, 704}, + dictWord{10, 0, 261}, + dictWord{10, 0, 619}, + dictWord{11, 0, 547}, + dictWord{11, 0, 619}, + dictWord{143, 0, 157}, + dictWord{4, 0, 531}, + dictWord{5, 0, 455}, + dictWord{5, 11, 301}, + dictWord{6, 11, 571}, + dictWord{14, 11, 49}, + dictWord{ + 146, + 11, + 102, + }, + dictWord{132, 10, 267}, + dictWord{6, 0, 385}, + dictWord{7, 0, 2008}, + dictWord{9, 0, 337}, + dictWord{138, 0, 517}, + dictWord{133, 11, 726}, + dictWord{133, 11, 364}, + dictWord{4, 11, 76}, + dictWord{7, 11, 1550}, + dictWord{9, 11, 306}, + dictWord{9, 11, 430}, + dictWord{9, 11, 663}, + dictWord{10, 11, 683}, + dictWord{11, 11, 427}, + dictWord{11, 11, 753}, + dictWord{12, 11, 334}, + dictWord{12, 11, 442}, + dictWord{14, 11, 258}, + dictWord{14, 11, 366}, + dictWord{ + 143, + 11, + 131, + }, + dictWord{6, 0, 1865}, + dictWord{6, 0, 1879}, + dictWord{6, 0, 1881}, + dictWord{6, 0, 1894}, + dictWord{6, 0, 1908}, + dictWord{9, 0, 915}, + dictWord{9, 0, 926}, + dictWord{9, 0, 940}, + dictWord{9, 0, 943}, + dictWord{9, 0, 966}, + dictWord{9, 0, 980}, + dictWord{9, 0, 989}, + dictWord{9, 0, 1005}, + dictWord{9, 0, 1010}, + dictWord{ + 12, + 0, + 813, + }, + dictWord{12, 0, 817}, + dictWord{12, 0, 840}, + dictWord{12, 0, 843}, + dictWord{12, 0, 855}, + dictWord{12, 0, 864}, + dictWord{12, 0, 871}, + dictWord{12, 0, 872}, + dictWord{12, 0, 899}, + dictWord{12, 0, 905}, + dictWord{12, 0, 924}, + dictWord{15, 0, 171}, + dictWord{15, 0, 181}, + dictWord{15, 0, 224}, + dictWord{15, 0, 235}, + dictWord{15, 0, 251}, + dictWord{146, 0, 184}, + dictWord{137, 11, 52}, + dictWord{5, 0, 16}, + dictWord{6, 0, 86}, + dictWord{6, 0, 603}, + dictWord{7, 0, 292}, + dictWord{7, 0, 561}, + dictWord{8, 0, 257}, + dictWord{8, 0, 382}, + dictWord{9, 0, 721}, + dictWord{9, 0, 778}, + dictWord{11, 0, 581}, + dictWord{140, 0, 466}, + dictWord{4, 0, 486}, + dictWord{ + 5, + 0, + 491, + }, + dictWord{135, 10, 1121}, + dictWord{4, 0, 72}, + dictWord{6, 0, 265}, + dictWord{135, 0, 1300}, + dictWord{135, 11, 1183}, + dictWord{10, 10, 249}, + dictWord{139, 10, 209}, + dictWord{132, 10, 561}, + dictWord{137, 11, 519}, + dictWord{4, 11, 656}, + dictWord{4, 10, 760}, + dictWord{135, 11, 779}, + dictWord{ + 9, + 10, + 154, + }, + dictWord{140, 10, 485}, + dictWord{135, 11, 1793}, + dictWord{135, 11, 144}, + dictWord{136, 10, 255}, + dictWord{133, 0, 621}, + dictWord{4, 10, 368}, + dictWord{135, 10, 641}, + dictWord{135, 11, 1373}, + dictWord{7, 11, 554}, + dictWord{7, 11, 605}, + dictWord{141, 11, 10}, + dictWord{137, 0, 234}, + dictWord{ + 5, + 0, + 815, + }, + dictWord{6, 0, 1688}, + dictWord{134, 0, 1755}, + dictWord{5, 11, 838}, + dictWord{5, 11, 841}, + dictWord{134, 11, 1649}, + dictWord{7, 0, 1987}, + dictWord{ + 7, + 0, + 2040, + }, + dictWord{136, 0, 743}, + dictWord{133, 11, 1012}, + dictWord{6, 0, 197}, + dictWord{136, 0, 205}, + dictWord{6, 0, 314}, + dictWord{134, 11, 314}, + dictWord{144, 11, 53}, + dictWord{6, 11, 251}, + dictWord{7, 11, 365}, + dictWord{7, 11, 1357}, + dictWord{7, 11, 1497}, + dictWord{8, 11, 154}, + dictWord{141, 11, 281}, + dictWord{133, 11, 340}, + dictWord{6, 0, 452}, + dictWord{7, 0, 312}, + dictWord{138, 0, 219}, + dictWord{138, 0, 589}, + dictWord{4, 0, 333}, + dictWord{9, 0, 176}, + dictWord{12, 0, 353}, + dictWord{141, 0, 187}, + dictWord{9, 10, 92}, + dictWord{147, 10, 91}, + dictWord{134, 0, 1110}, + dictWord{11, 0, 47}, + dictWord{139, 11, 495}, + dictWord{6, 10, 525}, + dictWord{8, 10, 806}, + dictWord{9, 10, 876}, + dictWord{140, 10, 284}, + dictWord{8, 11, 261}, + dictWord{9, 11, 144}, + dictWord{9, 11, 466}, + dictWord{10, 11, 370}, + dictWord{12, 11, 470}, + dictWord{13, 11, 144}, + dictWord{142, 11, 348}, + dictWord{137, 11, 897}, + dictWord{8, 0, 863}, + dictWord{8, 0, 864}, + dictWord{8, 0, 868}, + dictWord{8, 0, 884}, + dictWord{10, 0, 866}, + dictWord{10, 0, 868}, + dictWord{10, 0, 873}, + dictWord{10, 0, 911}, + dictWord{10, 0, 912}, + dictWord{ + 10, + 0, + 944, + }, + dictWord{12, 0, 727}, + dictWord{6, 11, 248}, + dictWord{9, 11, 546}, + dictWord{10, 11, 535}, + dictWord{11, 11, 681}, + dictWord{141, 11, 135}, + dictWord{ + 6, + 0, + 300, + }, + dictWord{135, 0, 1515}, + dictWord{134, 0, 1237}, + dictWord{139, 10, 958}, + dictWord{133, 10, 594}, + dictWord{140, 11, 250}, + dictWord{ + 134, + 0, + 1685, + }, + dictWord{134, 11, 567}, + dictWord{7, 0, 135}, + dictWord{8, 0, 7}, + dictWord{8, 0, 62}, + dictWord{9, 0, 243}, + dictWord{10, 0, 658}, + dictWord{10, 0, 697}, + dictWord{11, 0, 456}, + dictWord{139, 0, 756}, + dictWord{9, 0, 395}, + dictWord{138, 0, 79}, + dictWord{6, 10, 1641}, + dictWord{136, 10, 820}, + dictWord{4, 10, 302}, + dictWord{135, 10, 1766}, + dictWord{134, 11, 174}, + dictWord{135, 10, 1313}, + dictWord{135, 0, 631}, + dictWord{134, 10, 1674}, + dictWord{134, 11, 395}, + dictWord{138, 0, 835}, + dictWord{7, 0, 406}, + dictWord{7, 0, 459}, + dictWord{8, 0, 606}, + dictWord{139, 0, 726}, + dictWord{134, 11, 617}, + dictWord{134, 0, 979}, + dictWord{ + 6, + 10, + 389, + }, + dictWord{7, 10, 149}, + dictWord{9, 10, 142}, + dictWord{138, 10, 94}, + dictWord{5, 11, 878}, + dictWord{133, 11, 972}, + dictWord{6, 10, 8}, + dictWord{ + 7, + 10, + 1881, + }, + dictWord{8, 10, 91}, + dictWord{136, 11, 511}, + dictWord{133, 0, 612}, + dictWord{132, 11, 351}, + dictWord{4, 0, 372}, + dictWord{7, 0, 482}, + dictWord{ + 8, + 0, + 158, + }, + dictWord{9, 0, 602}, + dictWord{9, 0, 615}, + dictWord{10, 0, 245}, + dictWord{10, 0, 678}, + dictWord{10, 0, 744}, + dictWord{11, 0, 248}, + dictWord{ + 139, + 0, + 806, + }, + dictWord{5, 0, 854}, + dictWord{135, 0, 1991}, + dictWord{132, 11, 286}, + dictWord{135, 11, 344}, + dictWord{7, 11, 438}, + dictWord{7, 11, 627}, + dictWord{ + 7, + 11, + 1516, + }, + dictWord{8, 11, 40}, + dictWord{9, 11, 56}, + dictWord{9, 11, 294}, + dictWord{10, 11, 30}, + dictWord{10, 11, 259}, + dictWord{11, 11, 969}, + dictWord{ + 146, + 11, + 148, + }, + dictWord{135, 0, 1492}, + dictWord{5, 11, 259}, + dictWord{7, 11, 414}, + dictWord{7, 11, 854}, + dictWord{142, 11, 107}, + dictWord{135, 10, 1746}, + dictWord{6, 0, 833}, + dictWord{134, 0, 998}, + dictWord{135, 10, 24}, + dictWord{6, 0, 750}, + dictWord{135, 0, 1739}, + dictWord{4, 10, 503}, + dictWord{ + 135, + 10, + 1661, + }, + dictWord{5, 10, 130}, + dictWord{7, 10, 1314}, + dictWord{9, 10, 610}, + dictWord{10, 10, 718}, + dictWord{11, 10, 601}, + dictWord{11, 10, 819}, + dictWord{ + 11, + 10, + 946, + }, + dictWord{140, 10, 536}, + dictWord{10, 10, 149}, + dictWord{11, 10, 280}, + dictWord{142, 10, 336}, + dictWord{132, 11, 738}, + dictWord{ + 135, + 10, + 1946, + }, + dictWord{5, 0, 195}, + dictWord{135, 0, 1685}, + dictWord{7, 0, 1997}, + dictWord{8, 0, 730}, + dictWord{139, 0, 1006}, + dictWord{151, 11, 17}, + dictWord{ + 133, + 11, + 866, + }, + dictWord{14, 0, 463}, + dictWord{14, 0, 470}, + dictWord{150, 0, 61}, + dictWord{5, 0, 751}, + dictWord{8, 0, 266}, + dictWord{11, 0, 578}, + dictWord{ + 4, + 10, + 392, + }, + dictWord{135, 10, 1597}, + dictWord{5, 10, 433}, + dictWord{9, 10, 633}, + dictWord{139, 10, 629}, + dictWord{135, 0, 821}, + dictWord{6, 0, 715}, + dictWord{ + 134, + 0, + 1325, + }, + dictWord{133, 11, 116}, + dictWord{6, 0, 868}, + dictWord{132, 11, 457}, + dictWord{134, 0, 959}, + dictWord{6, 10, 234}, + dictWord{138, 11, 199}, + dictWord{7, 0, 1053}, + dictWord{7, 10, 1950}, + dictWord{8, 10, 680}, + dictWord{11, 10, 817}, + dictWord{147, 10, 88}, + dictWord{7, 10, 1222}, + dictWord{ + 138, + 10, + 386, + }, + dictWord{5, 0, 950}, + dictWord{5, 0, 994}, + dictWord{6, 0, 351}, + dictWord{134, 0, 1124}, + dictWord{134, 0, 1081}, + dictWord{7, 0, 1595}, + dictWord{6, 10, 5}, + dictWord{11, 10, 249}, + dictWord{12, 10, 313}, + dictWord{16, 10, 66}, + dictWord{145, 10, 26}, + dictWord{148, 0, 59}, + dictWord{5, 11, 527}, + dictWord{6, 11, 189}, + dictWord{135, 11, 859}, + dictWord{5, 10, 963}, + dictWord{6, 10, 1773}, + dictWord{11, 11, 104}, + dictWord{11, 11, 554}, + dictWord{15, 11, 60}, + dictWord{ + 143, + 11, + 125, + }, + dictWord{135, 0, 47}, + dictWord{137, 0, 684}, + dictWord{134, 11, 116}, + dictWord{134, 0, 1606}, + dictWord{134, 0, 777}, + dictWord{7, 0, 1020}, + dictWord{ + 8, + 10, + 509, + }, + dictWord{136, 10, 792}, + dictWord{135, 0, 1094}, + dictWord{132, 0, 350}, + dictWord{133, 11, 487}, + dictWord{4, 11, 86}, + dictWord{5, 11, 667}, + dictWord{5, 11, 753}, + dictWord{6, 11, 316}, + dictWord{6, 11, 455}, + dictWord{135, 11, 946}, + dictWord{7, 0, 1812}, + dictWord{13, 0, 259}, + dictWord{13, 0, 356}, + dictWord{14, 0, 242}, + dictWord{147, 0, 114}, + dictWord{132, 10, 931}, + dictWord{133, 0, 967}, + dictWord{4, 0, 473}, + dictWord{7, 0, 623}, + dictWord{8, 0, 808}, + dictWord{ + 9, + 0, + 871, + }, + dictWord{9, 0, 893}, + dictWord{11, 0, 38}, + dictWord{11, 0, 431}, + dictWord{12, 0, 112}, + dictWord{12, 0, 217}, + dictWord{12, 0, 243}, + dictWord{12, 0, 562}, + dictWord{12, 0, 663}, + dictWord{12, 0, 683}, + dictWord{13, 0, 141}, + dictWord{13, 0, 197}, + dictWord{13, 0, 227}, + dictWord{13, 0, 406}, + dictWord{13, 0, 487}, + dictWord{14, 0, 156}, + dictWord{14, 0, 203}, + dictWord{14, 0, 224}, + dictWord{14, 0, 256}, + dictWord{18, 0, 58}, + dictWord{150, 0, 0}, + dictWord{138, 0, 286}, + dictWord{ + 7, + 10, + 943, + }, + dictWord{139, 10, 614}, + dictWord{135, 10, 1837}, + dictWord{150, 11, 45}, + dictWord{132, 0, 798}, + dictWord{4, 0, 222}, + dictWord{7, 0, 286}, + dictWord{136, 0, 629}, + dictWord{4, 11, 79}, + dictWord{7, 11, 1773}, + dictWord{10, 11, 450}, + dictWord{11, 11, 589}, + dictWord{13, 11, 332}, + dictWord{13, 11, 493}, + dictWord{14, 11, 183}, + dictWord{14, 11, 334}, + dictWord{14, 11, 362}, + dictWord{14, 11, 368}, + dictWord{14, 11, 376}, + dictWord{14, 11, 379}, + dictWord{ + 19, + 11, + 90, + }, + dictWord{19, 11, 103}, + dictWord{19, 11, 127}, + dictWord{148, 11, 90}, + dictWord{5, 0, 337}, + dictWord{11, 0, 513}, + dictWord{11, 0, 889}, + dictWord{ + 11, + 0, + 961, + }, + dictWord{12, 0, 461}, + dictWord{13, 0, 79}, + dictWord{15, 0, 121}, + dictWord{4, 10, 90}, + dictWord{5, 10, 545}, + dictWord{7, 10, 754}, + dictWord{9, 10, 186}, + dictWord{10, 10, 72}, + dictWord{10, 10, 782}, + dictWord{11, 10, 577}, + dictWord{11, 10, 610}, + dictWord{12, 10, 354}, + dictWord{12, 10, 362}, + dictWord{ + 140, + 10, + 595, + }, + dictWord{141, 0, 306}, + dictWord{136, 0, 146}, + dictWord{7, 0, 1646}, + dictWord{9, 10, 329}, + dictWord{11, 10, 254}, + dictWord{141, 11, 124}, + dictWord{ + 4, + 0, + 465, + }, + dictWord{135, 0, 1663}, + dictWord{132, 0, 525}, + dictWord{133, 11, 663}, + dictWord{10, 0, 299}, + dictWord{18, 0, 74}, + dictWord{9, 10, 187}, + dictWord{ + 11, + 10, + 1016, + }, + dictWord{145, 10, 44}, + dictWord{7, 0, 165}, + dictWord{7, 0, 919}, + dictWord{4, 10, 506}, + dictWord{136, 10, 517}, + dictWord{5, 10, 295}, + dictWord{ + 135, + 10, + 1680, + }, + dictWord{133, 11, 846}, + dictWord{134, 0, 1064}, + dictWord{5, 11, 378}, + dictWord{7, 11, 1402}, + dictWord{7, 11, 1414}, + dictWord{8, 11, 465}, + dictWord{9, 11, 286}, + dictWord{10, 11, 185}, + dictWord{10, 11, 562}, + dictWord{10, 11, 635}, + dictWord{11, 11, 31}, + dictWord{11, 11, 393}, + dictWord{ + 12, + 11, + 456, + }, + dictWord{13, 11, 312}, + dictWord{18, 11, 65}, + dictWord{18, 11, 96}, + dictWord{147, 11, 89}, + dictWord{132, 0, 596}, + dictWord{7, 10, 987}, + dictWord{ + 9, + 10, + 688, + }, + dictWord{10, 10, 522}, + dictWord{11, 10, 788}, + dictWord{140, 10, 566}, + dictWord{6, 0, 82}, + dictWord{7, 0, 138}, + dictWord{7, 0, 517}, + dictWord{7, 0, 1741}, + dictWord{11, 0, 238}, + dictWord{4, 11, 648}, + dictWord{134, 10, 1775}, + dictWord{7, 0, 1233}, + dictWord{7, 10, 700}, + dictWord{7, 10, 940}, + dictWord{8, 10, 514}, + dictWord{9, 10, 116}, + dictWord{9, 10, 535}, + dictWord{10, 10, 118}, + dictWord{11, 10, 107}, + dictWord{11, 10, 148}, + dictWord{11, 10, 922}, + dictWord{ + 12, + 10, + 254, + }, + dictWord{12, 10, 421}, + dictWord{142, 10, 238}, + dictWord{4, 0, 962}, + dictWord{6, 0, 1824}, + dictWord{8, 0, 894}, + dictWord{12, 0, 708}, + dictWord{ + 12, + 0, + 725, + }, + dictWord{14, 0, 451}, + dictWord{20, 0, 94}, + dictWord{22, 0, 59}, + dictWord{150, 0, 62}, + dictWord{5, 11, 945}, + dictWord{6, 11, 1656}, + dictWord{6, 11, 1787}, + dictWord{7, 11, 167}, + dictWord{8, 11, 824}, + dictWord{9, 11, 391}, + dictWord{10, 11, 375}, + dictWord{139, 11, 185}, + dictWord{5, 0, 495}, + dictWord{7, 0, 834}, + dictWord{9, 0, 733}, + dictWord{139, 0, 378}, + dictWord{4, 10, 743}, + dictWord{135, 11, 1273}, + dictWord{6, 0, 1204}, + dictWord{7, 11, 1645}, + dictWord{8, 11, 352}, + dictWord{137, 11, 249}, + dictWord{139, 10, 292}, + dictWord{133, 0, 559}, + dictWord{132, 11, 152}, + dictWord{9, 0, 499}, + dictWord{10, 0, 341}, + dictWord{ + 15, + 0, + 144, + }, + dictWord{19, 0, 49}, + dictWord{7, 10, 1283}, + dictWord{9, 10, 227}, + dictWord{11, 10, 325}, + dictWord{11, 10, 408}, + dictWord{14, 10, 180}, + dictWord{ + 146, + 10, + 47, + }, + dictWord{6, 0, 21}, + dictWord{6, 0, 1737}, + dictWord{7, 0, 1444}, + dictWord{136, 0, 224}, + dictWord{133, 11, 1006}, + dictWord{7, 0, 1446}, + dictWord{ + 9, + 0, + 97, + }, + dictWord{17, 0, 15}, + dictWord{5, 10, 81}, + dictWord{7, 10, 146}, + dictWord{7, 10, 1342}, + dictWord{8, 10, 53}, + dictWord{8, 10, 561}, + dictWord{8, 10, 694}, + dictWord{8, 10, 754}, + dictWord{9, 10, 115}, + dictWord{9, 10, 894}, + dictWord{10, 10, 462}, + dictWord{10, 10, 813}, + dictWord{11, 10, 230}, + dictWord{11, 10, 657}, + dictWord{11, 10, 699}, + dictWord{11, 10, 748}, + dictWord{12, 10, 119}, + dictWord{12, 10, 200}, + dictWord{12, 10, 283}, + dictWord{142, 10, 273}, + dictWord{ + 5, + 10, + 408, + }, + dictWord{137, 10, 747}, + dictWord{135, 11, 431}, + dictWord{135, 11, 832}, + dictWord{6, 0, 729}, + dictWord{134, 0, 953}, + dictWord{4, 0, 727}, + dictWord{ + 8, + 0, + 565, + }, + dictWord{5, 11, 351}, + dictWord{7, 11, 264}, + dictWord{136, 11, 565}, + dictWord{134, 0, 1948}, + dictWord{5, 0, 519}, + dictWord{5, 11, 40}, + dictWord{ + 7, + 11, + 598, + }, + dictWord{7, 11, 1638}, + dictWord{8, 11, 78}, + dictWord{9, 11, 166}, + dictWord{9, 11, 640}, + dictWord{9, 11, 685}, + dictWord{9, 11, 773}, + dictWord{ + 11, + 11, + 215, + }, + dictWord{13, 11, 65}, + dictWord{14, 11, 172}, + dictWord{14, 11, 317}, + dictWord{145, 11, 6}, + dictWord{8, 11, 60}, + dictWord{9, 11, 343}, + dictWord{ + 139, + 11, + 769, + }, + dictWord{137, 11, 455}, + dictWord{134, 0, 1193}, + dictWord{140, 0, 790}, + dictWord{7, 11, 1951}, + dictWord{8, 11, 765}, + dictWord{8, 11, 772}, + dictWord{140, 11, 671}, + dictWord{7, 11, 108}, + dictWord{8, 11, 219}, + dictWord{8, 11, 388}, + dictWord{9, 11, 639}, + dictWord{9, 11, 775}, + dictWord{11, 11, 275}, + dictWord{140, 11, 464}, + dictWord{132, 11, 468}, + dictWord{7, 10, 30}, + dictWord{8, 10, 86}, + dictWord{8, 10, 315}, + dictWord{8, 10, 700}, + dictWord{9, 10, 576}, + dictWord{ + 9, + 10, + 858, + }, + dictWord{11, 10, 310}, + dictWord{11, 10, 888}, + dictWord{11, 10, 904}, + dictWord{12, 10, 361}, + dictWord{141, 10, 248}, + dictWord{5, 11, 15}, + dictWord{6, 11, 56}, + dictWord{7, 11, 1758}, + dictWord{8, 11, 500}, + dictWord{9, 11, 730}, + dictWord{11, 11, 331}, + dictWord{13, 11, 150}, + dictWord{142, 11, 282}, + dictWord{4, 0, 402}, + dictWord{7, 0, 2}, + dictWord{8, 0, 323}, + dictWord{136, 0, 479}, + dictWord{138, 10, 839}, + dictWord{11, 0, 580}, + dictWord{142, 0, 201}, + dictWord{ + 5, + 0, + 59, + }, + dictWord{135, 0, 672}, + dictWord{137, 10, 617}, + dictWord{146, 0, 34}, + dictWord{134, 11, 1886}, + dictWord{4, 0, 961}, + dictWord{136, 0, 896}, + dictWord{ + 6, + 0, + 1285, + }, + dictWord{5, 11, 205}, + dictWord{6, 11, 438}, + dictWord{137, 11, 711}, + dictWord{134, 10, 428}, + dictWord{7, 10, 524}, + dictWord{8, 10, 169}, + dictWord{8, 10, 234}, + dictWord{9, 10, 480}, + dictWord{138, 10, 646}, + dictWord{148, 0, 46}, + dictWord{141, 0, 479}, + dictWord{133, 11, 534}, + dictWord{6, 0, 2019}, + dictWord{134, 10, 1648}, + dictWord{4, 0, 85}, + dictWord{7, 0, 549}, + dictWord{7, 10, 1205}, + dictWord{138, 10, 637}, + dictWord{4, 0, 663}, + dictWord{5, 0, 94}, + dictWord{ + 7, + 11, + 235, + }, + dictWord{7, 11, 1475}, + dictWord{15, 11, 68}, + dictWord{146, 11, 120}, + dictWord{6, 11, 443}, + dictWord{9, 11, 237}, + dictWord{9, 11, 571}, + dictWord{ + 9, + 11, + 695, + }, + dictWord{10, 11, 139}, + dictWord{11, 11, 715}, + dictWord{12, 11, 417}, + dictWord{141, 11, 421}, + dictWord{132, 0, 783}, + dictWord{4, 0, 682}, + dictWord{8, 0, 65}, + dictWord{9, 10, 39}, + dictWord{10, 10, 166}, + dictWord{11, 10, 918}, + dictWord{12, 10, 635}, + dictWord{20, 10, 10}, + dictWord{22, 10, 27}, + dictWord{ + 22, + 10, + 43, + }, + dictWord{150, 10, 52}, + dictWord{6, 0, 11}, + dictWord{135, 0, 187}, + dictWord{132, 0, 522}, + dictWord{4, 0, 52}, + dictWord{135, 0, 661}, + dictWord{ + 4, + 0, + 383, + }, + dictWord{133, 0, 520}, + dictWord{135, 11, 546}, + dictWord{11, 0, 343}, + dictWord{142, 0, 127}, + dictWord{4, 11, 578}, + dictWord{7, 10, 157}, + dictWord{ + 7, + 11, + 624, + }, + dictWord{7, 11, 916}, + dictWord{8, 10, 279}, + dictWord{10, 11, 256}, + dictWord{11, 11, 87}, + dictWord{139, 11, 703}, + dictWord{134, 10, 604}, + dictWord{ + 4, + 0, + 281, + }, + dictWord{5, 0, 38}, + dictWord{7, 0, 194}, + dictWord{7, 0, 668}, + dictWord{7, 0, 1893}, + dictWord{137, 0, 397}, + dictWord{7, 10, 945}, + dictWord{11, 10, 713}, + dictWord{139, 10, 744}, + dictWord{139, 10, 1022}, + dictWord{9, 0, 635}, + dictWord{139, 0, 559}, + dictWord{5, 11, 923}, + dictWord{7, 11, 490}, + dictWord{ + 12, + 11, + 553, + }, + dictWord{13, 11, 100}, + dictWord{14, 11, 118}, + dictWord{143, 11, 75}, + dictWord{132, 0, 975}, + dictWord{132, 10, 567}, + dictWord{137, 10, 859}, + dictWord{7, 10, 1846}, + dictWord{7, 11, 1846}, + dictWord{8, 10, 628}, + dictWord{136, 11, 628}, + dictWord{148, 0, 116}, + dictWord{138, 11, 750}, + dictWord{14, 0, 51}, + dictWord{14, 11, 51}, + dictWord{15, 11, 7}, + dictWord{148, 11, 20}, + dictWord{132, 0, 858}, + dictWord{134, 0, 1075}, + dictWord{4, 11, 924}, + dictWord{ + 133, + 10, + 762, + }, + dictWord{136, 0, 535}, + dictWord{133, 0, 448}, + dictWord{10, 10, 784}, + dictWord{141, 10, 191}, + dictWord{133, 10, 298}, + dictWord{7, 0, 610}, + dictWord{135, 0, 1501}, + dictWord{7, 10, 633}, + dictWord{7, 10, 905}, + dictWord{7, 10, 909}, + dictWord{7, 10, 1538}, + dictWord{9, 10, 767}, + dictWord{140, 10, 636}, + dictWord{4, 11, 265}, + dictWord{7, 11, 807}, + dictWord{135, 11, 950}, + dictWord{5, 11, 93}, + dictWord{12, 11, 267}, + dictWord{144, 11, 26}, + dictWord{136, 0, 191}, + dictWord{139, 10, 301}, + dictWord{135, 10, 1970}, + dictWord{135, 0, 267}, + dictWord{4, 0, 319}, + dictWord{5, 0, 699}, + dictWord{138, 0, 673}, + dictWord{ + 6, + 0, + 336, + }, + dictWord{7, 0, 92}, + dictWord{7, 0, 182}, + dictWord{8, 0, 453}, + dictWord{8, 0, 552}, + dictWord{9, 0, 204}, + dictWord{9, 0, 285}, + dictWord{10, 0, 99}, + dictWord{ + 11, + 0, + 568, + }, + dictWord{11, 0, 950}, + dictWord{12, 0, 94}, + dictWord{16, 0, 20}, + dictWord{16, 0, 70}, + dictWord{19, 0, 55}, + dictWord{12, 10, 644}, + dictWord{144, 10, 90}, + dictWord{6, 0, 551}, + dictWord{7, 0, 1308}, + dictWord{7, 10, 845}, + dictWord{7, 11, 994}, + dictWord{8, 10, 160}, + dictWord{137, 10, 318}, + dictWord{19, 11, 1}, + dictWord{ + 19, + 11, + 26, + }, + dictWord{150, 11, 9}, + dictWord{7, 0, 1406}, + dictWord{9, 0, 218}, + dictWord{141, 0, 222}, + dictWord{5, 0, 256}, + dictWord{138, 0, 69}, + dictWord{ + 5, + 11, + 233, + }, + dictWord{5, 11, 320}, + dictWord{6, 11, 140}, + dictWord{7, 11, 330}, + dictWord{136, 11, 295}, + dictWord{6, 0, 1980}, + dictWord{136, 0, 952}, + dictWord{ + 4, + 0, + 833, + }, + dictWord{137, 11, 678}, + dictWord{133, 11, 978}, + dictWord{4, 11, 905}, + dictWord{6, 11, 1701}, + dictWord{137, 11, 843}, + dictWord{138, 10, 735}, + dictWord{136, 10, 76}, + dictWord{17, 0, 39}, + dictWord{148, 0, 36}, + dictWord{18, 0, 81}, + dictWord{146, 11, 81}, + dictWord{14, 0, 352}, + dictWord{17, 0, 53}, + dictWord{ + 18, + 0, + 146, + }, + dictWord{18, 0, 152}, + dictWord{19, 0, 11}, + dictWord{150, 0, 54}, + dictWord{135, 0, 634}, + dictWord{138, 10, 841}, + dictWord{132, 0, 618}, + dictWord{ + 4, + 0, + 339, + }, + dictWord{7, 0, 259}, + dictWord{17, 0, 73}, + dictWord{4, 11, 275}, + dictWord{140, 11, 376}, + dictWord{132, 11, 509}, + dictWord{7, 11, 273}, + dictWord{ + 139, + 11, + 377, + }, + dictWord{4, 0, 759}, + dictWord{13, 0, 169}, + dictWord{137, 10, 804}, + dictWord{6, 10, 96}, + dictWord{135, 10, 1426}, + dictWord{4, 10, 651}, + dictWord{133, 10, 289}, + dictWord{7, 0, 1075}, + dictWord{8, 10, 35}, + dictWord{9, 10, 511}, + dictWord{10, 10, 767}, + dictWord{147, 10, 118}, + dictWord{6, 0, 649}, + dictWord{6, 0, 670}, + dictWord{136, 0, 482}, + dictWord{5, 0, 336}, + dictWord{6, 0, 341}, + dictWord{6, 0, 478}, + dictWord{6, 0, 1763}, + dictWord{136, 0, 386}, + dictWord{ + 5, + 11, + 802, + }, + dictWord{7, 11, 2021}, + dictWord{8, 11, 805}, + dictWord{14, 11, 94}, + dictWord{15, 11, 65}, + dictWord{16, 11, 4}, + dictWord{16, 11, 77}, + dictWord{16, 11, 80}, + dictWord{145, 11, 5}, + dictWord{6, 0, 1035}, + dictWord{5, 11, 167}, + dictWord{5, 11, 899}, + dictWord{6, 11, 410}, + dictWord{137, 11, 777}, + dictWord{ + 134, + 11, + 1705, + }, + dictWord{5, 0, 924}, + dictWord{133, 0, 969}, + dictWord{132, 10, 704}, + dictWord{135, 0, 73}, + dictWord{135, 11, 10}, + dictWord{135, 10, 1078}, + dictWord{ + 5, + 11, + 11, + }, + dictWord{6, 11, 117}, + dictWord{6, 11, 485}, + dictWord{7, 11, 1133}, + dictWord{9, 11, 582}, + dictWord{9, 11, 594}, + dictWord{11, 11, 21}, + dictWord{ + 11, + 11, + 818, + }, + dictWord{12, 11, 535}, + dictWord{141, 11, 86}, + dictWord{135, 0, 1971}, + dictWord{4, 11, 264}, + dictWord{7, 11, 1067}, + dictWord{8, 11, 204}, + dictWord{8, 11, 385}, + dictWord{139, 11, 953}, + dictWord{6, 0, 1458}, + dictWord{135, 0, 1344}, + dictWord{5, 0, 396}, + dictWord{134, 0, 501}, + dictWord{4, 10, 720}, + dictWord{133, 10, 306}, + dictWord{4, 0, 929}, + dictWord{5, 0, 799}, + dictWord{8, 0, 46}, + dictWord{8, 0, 740}, + dictWord{133, 10, 431}, + dictWord{7, 11, 646}, + dictWord{ + 7, + 11, + 1730, + }, + dictWord{11, 11, 446}, + dictWord{141, 11, 178}, + dictWord{7, 0, 276}, + dictWord{5, 10, 464}, + dictWord{6, 10, 236}, + dictWord{7, 10, 696}, + dictWord{ + 7, + 10, + 914, + }, + dictWord{7, 10, 1108}, + dictWord{7, 10, 1448}, + dictWord{9, 10, 15}, + dictWord{9, 10, 564}, + dictWord{10, 10, 14}, + dictWord{12, 10, 565}, + dictWord{ + 13, + 10, + 449, + }, + dictWord{14, 10, 53}, + dictWord{15, 10, 13}, + dictWord{16, 10, 64}, + dictWord{145, 10, 41}, + dictWord{4, 0, 892}, + dictWord{133, 0, 770}, + dictWord{ + 6, + 10, + 1767, + }, + dictWord{12, 10, 194}, + dictWord{145, 10, 107}, + dictWord{135, 0, 158}, + dictWord{5, 10, 840}, + dictWord{138, 11, 608}, + dictWord{134, 0, 1432}, + dictWord{138, 11, 250}, + dictWord{8, 11, 794}, + dictWord{9, 11, 400}, + dictWord{10, 11, 298}, + dictWord{142, 11, 228}, + dictWord{151, 0, 25}, + dictWord{ + 7, + 11, + 1131, + }, + dictWord{135, 11, 1468}, + dictWord{135, 0, 2001}, + dictWord{9, 10, 642}, + dictWord{11, 10, 236}, + dictWord{142, 10, 193}, + dictWord{4, 10, 68}, + dictWord{5, 10, 634}, + dictWord{6, 10, 386}, + dictWord{7, 10, 794}, + dictWord{8, 10, 273}, + dictWord{9, 10, 563}, + dictWord{10, 10, 105}, + dictWord{10, 10, 171}, + dictWord{11, 10, 94}, + dictWord{139, 10, 354}, + dictWord{136, 11, 724}, + dictWord{132, 0, 478}, + dictWord{11, 11, 512}, + dictWord{13, 11, 205}, + dictWord{ + 19, + 11, + 30, + }, + dictWord{22, 11, 36}, + dictWord{151, 11, 19}, + dictWord{7, 0, 1461}, + dictWord{140, 0, 91}, + dictWord{6, 11, 190}, + dictWord{7, 11, 768}, + dictWord{ + 135, + 11, + 1170, + }, + dictWord{4, 0, 602}, + dictWord{8, 0, 211}, + dictWord{4, 10, 95}, + dictWord{7, 10, 416}, + dictWord{139, 10, 830}, + dictWord{7, 10, 731}, + dictWord{13, 10, 20}, + dictWord{143, 10, 11}, + dictWord{6, 0, 1068}, + dictWord{135, 0, 1872}, + dictWord{4, 0, 13}, + dictWord{5, 0, 567}, + dictWord{7, 0, 1498}, + dictWord{9, 0, 124}, + dictWord{11, 0, 521}, + dictWord{12, 0, 405}, + dictWord{135, 11, 1023}, + dictWord{135, 0, 1006}, + dictWord{132, 0, 735}, + dictWord{138, 0, 812}, + dictWord{4, 0, 170}, + dictWord{135, 0, 323}, + dictWord{6, 11, 137}, + dictWord{9, 11, 75}, + dictWord{9, 11, 253}, + dictWord{10, 11, 194}, + dictWord{138, 11, 444}, + dictWord{5, 0, 304}, + dictWord{7, 0, 1403}, + dictWord{5, 10, 864}, + dictWord{10, 10, 648}, + dictWord{11, 10, 671}, + dictWord{143, 10, 46}, + dictWord{135, 11, 1180}, + dictWord{ + 133, + 10, + 928, + }, + dictWord{4, 0, 148}, + dictWord{133, 0, 742}, + dictWord{11, 10, 986}, + dictWord{140, 10, 682}, + dictWord{133, 0, 523}, + dictWord{135, 11, 1743}, + dictWord{7, 0, 730}, + dictWord{18, 0, 144}, + dictWord{19, 0, 61}, + dictWord{8, 10, 44}, + dictWord{9, 10, 884}, + dictWord{10, 10, 580}, + dictWord{11, 10, 399}, + dictWord{ + 11, + 10, + 894, + }, + dictWord{143, 10, 122}, + dictWord{5, 11, 760}, + dictWord{7, 11, 542}, + dictWord{8, 11, 135}, + dictWord{136, 11, 496}, + dictWord{136, 0, 981}, + dictWord{133, 0, 111}, + dictWord{10, 0, 132}, + dictWord{11, 0, 191}, + dictWord{11, 0, 358}, + dictWord{139, 0, 460}, + dictWord{7, 11, 319}, + dictWord{7, 11, 355}, + dictWord{ + 7, + 11, + 763, + }, + dictWord{10, 11, 389}, + dictWord{145, 11, 43}, + dictWord{134, 0, 890}, + dictWord{134, 0, 1420}, + dictWord{136, 11, 557}, + dictWord{ + 133, + 10, + 518, + }, + dictWord{133, 0, 444}, + dictWord{135, 0, 1787}, + dictWord{135, 10, 1852}, + dictWord{8, 0, 123}, + dictWord{15, 0, 6}, + dictWord{144, 0, 7}, + dictWord{ + 6, + 0, + 2041, + }, + dictWord{10, 11, 38}, + dictWord{139, 11, 784}, + dictWord{136, 0, 932}, + dictWord{5, 0, 937}, + dictWord{135, 0, 100}, + dictWord{6, 0, 995}, + dictWord{ + 4, + 11, + 58, + }, + dictWord{5, 11, 286}, + dictWord{6, 11, 319}, + dictWord{7, 11, 402}, + dictWord{7, 11, 1254}, + dictWord{7, 11, 1903}, + dictWord{8, 11, 356}, + dictWord{ + 140, + 11, + 408, + }, + dictWord{4, 11, 389}, + dictWord{9, 11, 181}, + dictWord{9, 11, 255}, + dictWord{10, 11, 8}, + dictWord{10, 11, 29}, + dictWord{10, 11, 816}, + dictWord{ + 11, + 11, + 311, + }, + dictWord{11, 11, 561}, + dictWord{12, 11, 67}, + dictWord{141, 11, 181}, + dictWord{138, 0, 255}, + dictWord{5, 0, 138}, + dictWord{4, 10, 934}, + dictWord{ + 136, + 10, + 610, + }, + dictWord{4, 0, 965}, + dictWord{10, 0, 863}, + dictWord{138, 0, 898}, + dictWord{10, 10, 804}, + dictWord{138, 10, 832}, + dictWord{12, 0, 631}, + dictWord{ + 8, + 10, + 96, + }, + dictWord{9, 10, 36}, + dictWord{10, 10, 607}, + dictWord{11, 10, 423}, + dictWord{11, 10, 442}, + dictWord{12, 10, 309}, + dictWord{14, 10, 199}, + dictWord{ + 15, + 10, + 90, + }, + dictWord{145, 10, 110}, + dictWord{134, 0, 1394}, + dictWord{4, 0, 652}, + dictWord{8, 0, 320}, + dictWord{22, 0, 6}, + dictWord{22, 0, 16}, + dictWord{ + 9, + 10, + 13, + }, + dictWord{9, 10, 398}, + dictWord{9, 10, 727}, + dictWord{10, 10, 75}, + dictWord{10, 10, 184}, + dictWord{10, 10, 230}, + dictWord{10, 10, 564}, + dictWord{ + 10, + 10, + 569, + }, + dictWord{11, 10, 973}, + dictWord{12, 10, 70}, + dictWord{12, 10, 189}, + dictWord{13, 10, 57}, + dictWord{141, 10, 257}, + dictWord{6, 0, 897}, + dictWord{ + 134, + 0, + 1333, + }, + dictWord{4, 0, 692}, + dictWord{133, 0, 321}, + dictWord{133, 11, 373}, + dictWord{135, 0, 922}, + dictWord{5, 0, 619}, + dictWord{133, 0, 698}, + dictWord{ + 137, + 10, + 631, + }, + dictWord{5, 10, 345}, + dictWord{135, 10, 1016}, + dictWord{9, 0, 957}, + dictWord{9, 0, 1018}, + dictWord{12, 0, 828}, + dictWord{12, 0, 844}, + dictWord{ + 12, + 0, + 897, + }, + dictWord{12, 0, 901}, + dictWord{12, 0, 943}, + dictWord{15, 0, 180}, + dictWord{18, 0, 197}, + dictWord{18, 0, 200}, + dictWord{18, 0, 213}, + dictWord{ + 18, + 0, + 214, + }, + dictWord{146, 0, 226}, + dictWord{5, 0, 917}, + dictWord{134, 0, 1659}, + dictWord{135, 0, 1100}, + dictWord{134, 0, 1173}, + dictWord{134, 0, 1930}, + dictWord{5, 0, 251}, + dictWord{5, 0, 956}, + dictWord{8, 0, 268}, + dictWord{9, 0, 214}, + dictWord{146, 0, 142}, + dictWord{133, 10, 673}, + dictWord{137, 10, 850}, + dictWord{ + 4, + 10, + 287, + }, + dictWord{133, 10, 1018}, + dictWord{132, 11, 672}, + dictWord{5, 0, 346}, + dictWord{5, 0, 711}, + dictWord{8, 0, 390}, + dictWord{11, 11, 752}, + dictWord{139, 11, 885}, + dictWord{5, 10, 34}, + dictWord{10, 10, 724}, + dictWord{12, 10, 444}, + dictWord{13, 10, 354}, + dictWord{18, 10, 32}, + dictWord{23, 10, 24}, + dictWord{23, 10, 31}, + dictWord{152, 10, 5}, + dictWord{4, 11, 710}, + dictWord{134, 11, 606}, + dictWord{134, 0, 744}, + dictWord{134, 10, 382}, + dictWord{ + 133, + 11, + 145, + }, + dictWord{4, 10, 329}, + dictWord{7, 11, 884}, + dictWord{140, 11, 124}, + dictWord{4, 11, 467}, + dictWord{5, 11, 405}, + dictWord{134, 11, 544}, + dictWord{ + 9, + 10, + 846, + }, + dictWord{138, 10, 827}, + dictWord{133, 0, 624}, + dictWord{9, 11, 372}, + dictWord{15, 11, 2}, + dictWord{19, 11, 10}, + dictWord{147, 11, 18}, + dictWord{ + 4, + 11, + 387, + }, + dictWord{135, 11, 1288}, + dictWord{5, 0, 783}, + dictWord{7, 0, 1998}, + dictWord{135, 0, 2047}, + dictWord{132, 10, 906}, + dictWord{136, 10, 366}, + dictWord{135, 11, 550}, + dictWord{4, 10, 123}, + dictWord{4, 10, 649}, + dictWord{5, 10, 605}, + dictWord{7, 10, 1509}, + dictWord{136, 10, 36}, + dictWord{ + 134, + 0, + 1125, + }, + dictWord{132, 0, 594}, + dictWord{133, 10, 767}, + dictWord{135, 11, 1227}, + dictWord{136, 11, 467}, + dictWord{4, 11, 576}, + dictWord{ + 135, + 11, + 1263, + }, + dictWord{4, 0, 268}, + dictWord{7, 0, 1534}, + dictWord{135, 11, 1534}, + dictWord{4, 10, 273}, + dictWord{5, 10, 658}, + dictWord{5, 11, 919}, + dictWord{ + 5, + 10, + 995, + }, + dictWord{134, 11, 1673}, + dictWord{133, 0, 563}, + dictWord{134, 10, 72}, + dictWord{135, 10, 1345}, + dictWord{4, 11, 82}, + dictWord{5, 11, 333}, + dictWord{ + 5, + 11, + 904, + }, + dictWord{6, 11, 207}, + dictWord{7, 11, 325}, + dictWord{7, 11, 1726}, + dictWord{8, 11, 101}, + dictWord{10, 11, 778}, + dictWord{139, 11, 220}, + dictWord{5, 0, 37}, + dictWord{6, 0, 39}, + dictWord{6, 0, 451}, + dictWord{7, 0, 218}, + dictWord{7, 0, 667}, + dictWord{7, 0, 1166}, + dictWord{7, 0, 1687}, + dictWord{8, 0, 662}, + dictWord{16, 0, 2}, + dictWord{133, 10, 589}, + dictWord{134, 0, 1332}, + dictWord{133, 11, 903}, + dictWord{134, 0, 508}, + dictWord{5, 10, 117}, + dictWord{6, 10, 514}, + dictWord{6, 10, 541}, + dictWord{7, 10, 1164}, + dictWord{7, 10, 1436}, + dictWord{8, 10, 220}, + dictWord{8, 10, 648}, + dictWord{10, 10, 688}, + dictWord{11, 10, 560}, + dictWord{140, 11, 147}, + dictWord{6, 11, 555}, + dictWord{135, 11, 485}, + dictWord{133, 10, 686}, + dictWord{7, 0, 453}, + dictWord{7, 0, 635}, + dictWord{7, 0, 796}, + dictWord{8, 0, 331}, + dictWord{9, 0, 330}, + dictWord{9, 0, 865}, + dictWord{10, 0, 119}, + dictWord{10, 0, 235}, + dictWord{11, 0, 111}, + dictWord{11, 0, 129}, + dictWord{ + 11, + 0, + 240, + }, + dictWord{12, 0, 31}, + dictWord{12, 0, 66}, + dictWord{12, 0, 222}, + dictWord{12, 0, 269}, + dictWord{12, 0, 599}, + dictWord{12, 0, 684}, + dictWord{12, 0, 689}, + dictWord{12, 0, 691}, + dictWord{142, 0, 345}, + dictWord{135, 0, 1834}, + dictWord{4, 11, 705}, + dictWord{7, 11, 615}, + dictWord{138, 11, 251}, + dictWord{ + 136, + 11, + 345, + }, + dictWord{137, 0, 527}, + dictWord{6, 0, 98}, + dictWord{7, 0, 702}, + dictWord{135, 0, 991}, + dictWord{11, 0, 576}, + dictWord{14, 0, 74}, + dictWord{7, 10, 196}, + dictWord{10, 10, 765}, + dictWord{11, 10, 347}, + dictWord{11, 10, 552}, + dictWord{11, 10, 790}, + dictWord{12, 10, 263}, + dictWord{13, 10, 246}, + dictWord{ + 13, + 10, + 270, + }, + dictWord{13, 10, 395}, + dictWord{14, 10, 176}, + dictWord{14, 10, 190}, + dictWord{14, 10, 398}, + dictWord{14, 10, 412}, + dictWord{15, 10, 32}, + dictWord{ + 15, + 10, + 63, + }, + dictWord{16, 10, 88}, + dictWord{147, 10, 105}, + dictWord{134, 11, 90}, + dictWord{13, 0, 84}, + dictWord{141, 0, 122}, + dictWord{6, 0, 37}, + dictWord{ + 7, + 0, + 299, + }, + dictWord{7, 0, 1666}, + dictWord{8, 0, 195}, + dictWord{8, 0, 316}, + dictWord{9, 0, 178}, + dictWord{9, 0, 276}, + dictWord{9, 0, 339}, + dictWord{9, 0, 536}, + dictWord{ + 10, + 0, + 102, + }, + dictWord{10, 0, 362}, + dictWord{10, 0, 785}, + dictWord{11, 0, 55}, + dictWord{11, 0, 149}, + dictWord{11, 0, 773}, + dictWord{13, 0, 416}, + dictWord{ + 13, + 0, + 419, + }, + dictWord{14, 0, 38}, + dictWord{14, 0, 41}, + dictWord{142, 0, 210}, + dictWord{5, 10, 381}, + dictWord{135, 10, 1792}, + dictWord{7, 11, 813}, + dictWord{ + 12, + 11, + 497, + }, + dictWord{141, 11, 56}, + dictWord{7, 10, 616}, + dictWord{138, 10, 413}, + dictWord{133, 0, 645}, + dictWord{6, 11, 125}, + dictWord{135, 11, 1277}, + dictWord{132, 0, 290}, + dictWord{6, 0, 70}, + dictWord{7, 0, 1292}, + dictWord{10, 0, 762}, + dictWord{139, 0, 288}, + dictWord{6, 10, 120}, + dictWord{7, 10, 1188}, + dictWord{ + 7, + 10, + 1710, + }, + dictWord{8, 10, 286}, + dictWord{9, 10, 667}, + dictWord{11, 10, 592}, + dictWord{139, 10, 730}, + dictWord{135, 11, 1784}, + dictWord{7, 0, 1315}, + dictWord{135, 11, 1315}, + dictWord{134, 0, 1955}, + dictWord{135, 10, 1146}, + dictWord{7, 0, 131}, + dictWord{7, 0, 422}, + dictWord{8, 0, 210}, + dictWord{ + 140, + 0, + 573, + }, + dictWord{4, 10, 352}, + dictWord{135, 10, 687}, + dictWord{139, 0, 797}, + dictWord{143, 0, 38}, + dictWord{14, 0, 179}, + dictWord{15, 0, 151}, + dictWord{ + 150, + 0, + 11, + }, + dictWord{7, 0, 488}, + dictWord{4, 10, 192}, + dictWord{5, 10, 49}, + dictWord{6, 10, 200}, + dictWord{6, 10, 293}, + dictWord{134, 10, 1696}, + dictWord{ + 132, + 0, + 936, + }, + dictWord{135, 11, 703}, + dictWord{6, 11, 160}, + dictWord{7, 11, 1106}, + dictWord{9, 11, 770}, + dictWord{10, 11, 618}, + dictWord{11, 11, 112}, + dictWord{ + 140, + 11, + 413, + }, + dictWord{5, 0, 453}, + dictWord{134, 0, 441}, + dictWord{135, 0, 595}, + dictWord{132, 10, 650}, + dictWord{132, 10, 147}, + dictWord{6, 0, 991}, + dictWord{6, 0, 1182}, + dictWord{12, 11, 271}, + dictWord{145, 11, 109}, + dictWord{133, 10, 934}, + dictWord{140, 11, 221}, + dictWord{132, 0, 653}, + dictWord{ + 7, + 0, + 505, + }, + dictWord{135, 0, 523}, + dictWord{134, 0, 903}, + dictWord{135, 11, 479}, + dictWord{7, 11, 304}, + dictWord{9, 11, 646}, + dictWord{9, 11, 862}, + dictWord{ + 10, + 11, + 262, + }, + dictWord{11, 11, 696}, + dictWord{12, 11, 208}, + dictWord{15, 11, 79}, + dictWord{147, 11, 108}, + dictWord{146, 0, 80}, + dictWord{135, 11, 981}, + dictWord{142, 0, 432}, + dictWord{132, 0, 314}, + dictWord{137, 11, 152}, + dictWord{7, 0, 1368}, + dictWord{8, 0, 232}, + dictWord{8, 0, 361}, + dictWord{10, 0, 682}, + dictWord{138, 0, 742}, + dictWord{135, 11, 1586}, + dictWord{9, 0, 534}, + dictWord{4, 11, 434}, + dictWord{11, 11, 663}, + dictWord{12, 11, 210}, + dictWord{13, 11, 166}, + dictWord{13, 11, 310}, + dictWord{14, 11, 373}, + dictWord{147, 11, 43}, + dictWord{7, 11, 1091}, + dictWord{135, 11, 1765}, + dictWord{6, 11, 550}, + dictWord{ + 135, + 11, + 652, + }, + dictWord{137, 0, 27}, + dictWord{142, 0, 12}, + dictWord{4, 10, 637}, + dictWord{5, 11, 553}, + dictWord{7, 11, 766}, + dictWord{138, 11, 824}, + dictWord{ + 7, + 11, + 737, + }, + dictWord{8, 11, 298}, + dictWord{136, 11, 452}, + dictWord{7, 0, 736}, + dictWord{139, 0, 264}, + dictWord{134, 0, 1657}, + dictWord{133, 11, 292}, + dictWord{138, 11, 135}, + dictWord{6, 0, 844}, + dictWord{134, 0, 1117}, + dictWord{135, 0, 127}, + dictWord{9, 10, 867}, + dictWord{138, 10, 837}, + dictWord{ + 6, + 0, + 1184, + }, + dictWord{134, 0, 1208}, + dictWord{134, 0, 1294}, + dictWord{136, 0, 364}, + dictWord{6, 0, 1415}, + dictWord{7, 0, 1334}, + dictWord{11, 0, 125}, + dictWord{ + 6, + 10, + 170, + }, + dictWord{7, 11, 393}, + dictWord{8, 10, 395}, + dictWord{8, 10, 487}, + dictWord{10, 11, 603}, + dictWord{11, 11, 206}, + dictWord{141, 10, 147}, + dictWord{137, 11, 748}, + dictWord{4, 11, 912}, + dictWord{137, 11, 232}, + dictWord{4, 10, 535}, + dictWord{136, 10, 618}, + dictWord{137, 0, 792}, + dictWord{ + 7, + 11, + 1973, + }, + dictWord{136, 11, 716}, + dictWord{135, 11, 98}, + dictWord{5, 0, 909}, + dictWord{9, 0, 849}, + dictWord{138, 0, 805}, + dictWord{4, 0, 630}, + dictWord{ + 132, + 0, + 699, + }, + dictWord{5, 11, 733}, + dictWord{14, 11, 103}, + dictWord{150, 10, 23}, + dictWord{12, 11, 158}, + dictWord{18, 11, 8}, + dictWord{19, 11, 62}, + dictWord{ + 20, + 11, + 6, + }, + dictWord{22, 11, 4}, + dictWord{23, 11, 2}, + dictWord{151, 11, 9}, + dictWord{132, 0, 968}, + dictWord{132, 10, 778}, + dictWord{132, 10, 46}, + dictWord{5, 10, 811}, + dictWord{6, 10, 1679}, + dictWord{6, 10, 1714}, + dictWord{135, 10, 2032}, + dictWord{6, 0, 1446}, + dictWord{7, 10, 1458}, + dictWord{9, 10, 407}, + dictWord{ + 139, + 10, + 15, + }, + dictWord{7, 0, 206}, + dictWord{7, 0, 397}, + dictWord{7, 0, 621}, + dictWord{7, 0, 640}, + dictWord{8, 0, 124}, + dictWord{8, 0, 619}, + dictWord{9, 0, 305}, + dictWord{ + 9, + 0, + 643, + }, + dictWord{10, 0, 264}, + dictWord{10, 0, 628}, + dictWord{11, 0, 40}, + dictWord{12, 0, 349}, + dictWord{13, 0, 134}, + dictWord{13, 0, 295}, + dictWord{ + 14, + 0, + 155, + }, + dictWord{15, 0, 120}, + dictWord{18, 0, 105}, + dictWord{6, 10, 34}, + dictWord{7, 10, 1089}, + dictWord{8, 10, 708}, + dictWord{8, 10, 721}, + dictWord{9, 10, 363}, + dictWord{148, 10, 98}, + dictWord{4, 0, 262}, + dictWord{5, 0, 641}, + dictWord{135, 0, 342}, + dictWord{137, 11, 72}, + dictWord{4, 0, 99}, + dictWord{6, 0, 250}, + dictWord{ + 6, + 0, + 346, + }, + dictWord{8, 0, 127}, + dictWord{138, 0, 81}, + dictWord{132, 0, 915}, + dictWord{5, 0, 75}, + dictWord{9, 0, 517}, + dictWord{10, 0, 470}, + dictWord{12, 0, 155}, + dictWord{141, 0, 224}, + dictWord{132, 10, 462}, + dictWord{11, 11, 600}, + dictWord{11, 11, 670}, + dictWord{141, 11, 245}, + dictWord{142, 0, 83}, + dictWord{ + 5, + 10, + 73, + }, + dictWord{6, 10, 23}, + dictWord{134, 10, 338}, + dictWord{6, 0, 1031}, + dictWord{139, 11, 923}, + dictWord{7, 11, 164}, + dictWord{7, 11, 1571}, + dictWord{ + 9, + 11, + 107, + }, + dictWord{140, 11, 225}, + dictWord{134, 0, 1470}, + dictWord{133, 0, 954}, + dictWord{6, 0, 304}, + dictWord{8, 0, 418}, + dictWord{10, 0, 345}, + dictWord{ + 11, + 0, + 341, + }, + dictWord{139, 0, 675}, + dictWord{9, 0, 410}, + dictWord{139, 0, 425}, + dictWord{4, 11, 27}, + dictWord{5, 11, 484}, + dictWord{5, 11, 510}, + dictWord{6, 11, 434}, + dictWord{7, 11, 1000}, + dictWord{7, 11, 1098}, + dictWord{8, 11, 2}, + dictWord{136, 11, 200}, + dictWord{134, 0, 734}, + dictWord{140, 11, 257}, + dictWord{ + 7, + 10, + 725, + }, + dictWord{8, 10, 498}, + dictWord{139, 10, 268}, + dictWord{134, 0, 1822}, + dictWord{135, 0, 1798}, + dictWord{135, 10, 773}, + dictWord{132, 11, 460}, + dictWord{4, 11, 932}, + dictWord{133, 11, 891}, + dictWord{134, 0, 14}, + dictWord{132, 10, 583}, + dictWord{7, 10, 1462}, + dictWord{8, 11, 625}, + dictWord{ + 139, + 10, + 659, + }, + dictWord{5, 0, 113}, + dictWord{6, 0, 243}, + dictWord{6, 0, 1708}, + dictWord{7, 0, 1865}, + dictWord{11, 0, 161}, + dictWord{16, 0, 37}, + dictWord{17, 0, 99}, + dictWord{133, 10, 220}, + dictWord{134, 11, 76}, + dictWord{5, 11, 461}, + dictWord{135, 11, 1925}, + dictWord{140, 0, 69}, + dictWord{8, 11, 92}, + dictWord{ + 137, + 11, + 221, + }, + dictWord{139, 10, 803}, + dictWord{132, 10, 544}, + dictWord{4, 0, 274}, + dictWord{134, 0, 922}, + dictWord{132, 0, 541}, + dictWord{5, 0, 627}, + dictWord{ + 6, + 10, + 437, + }, + dictWord{6, 10, 564}, + dictWord{11, 10, 181}, + dictWord{141, 10, 183}, + dictWord{135, 10, 1192}, + dictWord{7, 0, 166}, + dictWord{132, 11, 763}, + dictWord{133, 11, 253}, + dictWord{134, 0, 849}, + dictWord{9, 11, 73}, + dictWord{10, 11, 110}, + dictWord{14, 11, 185}, + dictWord{145, 11, 119}, + dictWord{5, 11, 212}, + dictWord{12, 11, 35}, + dictWord{141, 11, 382}, + dictWord{133, 0, 717}, + dictWord{137, 0, 304}, + dictWord{136, 0, 600}, + dictWord{133, 0, 654}, + dictWord{ + 6, + 0, + 273, + }, + dictWord{10, 0, 188}, + dictWord{13, 0, 377}, + dictWord{146, 0, 77}, + dictWord{4, 10, 790}, + dictWord{5, 10, 273}, + dictWord{134, 10, 394}, + dictWord{ + 132, + 0, + 543, + }, + dictWord{135, 0, 410}, + dictWord{11, 0, 98}, + dictWord{11, 0, 524}, + dictWord{141, 0, 87}, + dictWord{132, 0, 941}, + dictWord{135, 11, 1175}, + dictWord{ + 4, + 0, + 250, + }, + dictWord{7, 0, 1612}, + dictWord{11, 0, 186}, + dictWord{12, 0, 133}, + dictWord{6, 10, 127}, + dictWord{7, 10, 1511}, + dictWord{8, 10, 613}, + dictWord{ + 12, + 10, + 495, + }, + dictWord{12, 10, 586}, + dictWord{12, 10, 660}, + dictWord{12, 10, 668}, + dictWord{14, 10, 385}, + dictWord{15, 10, 118}, + dictWord{17, 10, 20}, + dictWord{ + 146, + 10, + 98, + }, + dictWord{6, 0, 1785}, + dictWord{133, 11, 816}, + dictWord{134, 0, 1339}, + dictWord{7, 0, 961}, + dictWord{7, 0, 1085}, + dictWord{7, 0, 1727}, + dictWord{ + 8, + 0, + 462, + }, + dictWord{6, 10, 230}, + dictWord{135, 11, 1727}, + dictWord{9, 0, 636}, + dictWord{135, 10, 1954}, + dictWord{132, 0, 780}, + dictWord{5, 11, 869}, + dictWord{5, 11, 968}, + dictWord{6, 11, 1626}, + dictWord{8, 11, 734}, + dictWord{136, 11, 784}, + dictWord{4, 11, 542}, + dictWord{6, 11, 1716}, + dictWord{6, 11, 1727}, + dictWord{7, 11, 1082}, + dictWord{7, 11, 1545}, + dictWord{8, 11, 56}, + dictWord{8, 11, 118}, + dictWord{8, 11, 412}, + dictWord{8, 11, 564}, + dictWord{9, 11, 888}, + dictWord{9, 11, 908}, + dictWord{10, 11, 50}, + dictWord{10, 11, 423}, + dictWord{11, 11, 685}, + dictWord{11, 11, 697}, + dictWord{11, 11, 933}, + dictWord{12, 11, 299}, + dictWord{13, 11, 126}, + dictWord{13, 11, 136}, + dictWord{13, 11, 170}, + dictWord{141, 11, 190}, + dictWord{134, 11, 226}, + dictWord{4, 11, 232}, + dictWord{ + 9, + 11, + 202, + }, + dictWord{10, 11, 474}, + dictWord{140, 11, 433}, + dictWord{137, 11, 500}, + dictWord{5, 0, 529}, + dictWord{136, 10, 68}, + dictWord{132, 10, 654}, + dictWord{ + 4, + 10, + 156, + }, + dictWord{7, 10, 998}, + dictWord{7, 10, 1045}, + dictWord{7, 10, 1860}, + dictWord{9, 10, 48}, + dictWord{9, 10, 692}, + dictWord{11, 10, 419}, + dictWord{139, 10, 602}, + dictWord{7, 0, 1276}, + dictWord{8, 0, 474}, + dictWord{9, 0, 652}, + dictWord{6, 11, 108}, + dictWord{7, 11, 1003}, + dictWord{7, 11, 1181}, + dictWord{136, 11, 343}, + dictWord{7, 11, 1264}, + dictWord{7, 11, 1678}, + dictWord{11, 11, 945}, + dictWord{12, 11, 341}, + dictWord{12, 11, 471}, + dictWord{ + 140, + 11, + 569, + }, + dictWord{134, 11, 1712}, + dictWord{5, 0, 948}, + dictWord{12, 0, 468}, + dictWord{19, 0, 96}, + dictWord{148, 0, 24}, + dictWord{4, 11, 133}, + dictWord{ + 7, + 11, + 711, + }, + dictWord{7, 11, 1298}, + dictWord{7, 11, 1585}, + dictWord{135, 11, 1929}, + dictWord{6, 0, 753}, + dictWord{140, 0, 657}, + dictWord{139, 0, 941}, + dictWord{ + 6, + 11, + 99, + }, + dictWord{7, 11, 1808}, + dictWord{145, 11, 57}, + dictWord{6, 11, 574}, + dictWord{7, 11, 428}, + dictWord{7, 11, 1250}, + dictWord{10, 11, 669}, + dictWord{ + 11, + 11, + 485, + }, + dictWord{11, 11, 840}, + dictWord{12, 11, 300}, + dictWord{142, 11, 250}, + dictWord{4, 0, 532}, + dictWord{5, 0, 706}, + dictWord{135, 0, 662}, + dictWord{ + 5, + 0, + 837, + }, + dictWord{6, 0, 1651}, + dictWord{139, 0, 985}, + dictWord{7, 0, 1861}, + dictWord{9, 10, 197}, + dictWord{10, 10, 300}, + dictWord{12, 10, 473}, + dictWord{ + 13, + 10, + 90, + }, + dictWord{141, 10, 405}, + dictWord{137, 11, 252}, + dictWord{6, 11, 323}, + dictWord{135, 11, 1564}, + dictWord{4, 0, 330}, + dictWord{4, 0, 863}, + dictWord{7, 0, 933}, + dictWord{7, 0, 2012}, + dictWord{8, 0, 292}, + dictWord{7, 11, 461}, + dictWord{8, 11, 775}, + dictWord{138, 11, 435}, + dictWord{132, 10, 606}, + dictWord{ + 4, + 11, + 655, + }, + dictWord{7, 11, 850}, + dictWord{17, 11, 75}, + dictWord{146, 11, 137}, + dictWord{135, 0, 767}, + dictWord{7, 10, 1978}, + dictWord{136, 10, 676}, + dictWord{132, 0, 641}, + dictWord{135, 11, 1559}, + dictWord{134, 0, 1233}, + dictWord{137, 0, 242}, + dictWord{17, 0, 114}, + dictWord{4, 10, 361}, + dictWord{ + 133, + 10, + 315, + }, + dictWord{137, 0, 883}, + dictWord{132, 10, 461}, + dictWord{138, 0, 274}, + dictWord{134, 0, 2008}, + dictWord{134, 0, 1794}, + dictWord{4, 0, 703}, + dictWord{135, 0, 207}, + dictWord{12, 0, 285}, + dictWord{132, 10, 472}, + dictWord{132, 0, 571}, + dictWord{5, 0, 873}, + dictWord{5, 0, 960}, + dictWord{8, 0, 823}, + dictWord{9, 0, 881}, + dictWord{136, 11, 577}, + dictWord{7, 0, 617}, + dictWord{10, 0, 498}, + dictWord{11, 0, 501}, + dictWord{12, 0, 16}, + dictWord{140, 0, 150}, + dictWord{ + 138, + 10, + 747, + }, + dictWord{132, 0, 431}, + dictWord{133, 10, 155}, + dictWord{11, 0, 283}, + dictWord{11, 0, 567}, + dictWord{7, 10, 163}, + dictWord{8, 10, 319}, + dictWord{ + 9, + 10, + 402, + }, + dictWord{10, 10, 24}, + dictWord{10, 10, 681}, + dictWord{11, 10, 200}, + dictWord{12, 10, 253}, + dictWord{12, 10, 410}, + dictWord{142, 10, 219}, + dictWord{4, 11, 413}, + dictWord{5, 11, 677}, + dictWord{8, 11, 432}, + dictWord{140, 11, 280}, + dictWord{9, 0, 401}, + dictWord{5, 10, 475}, + dictWord{7, 10, 1780}, + dictWord{11, 10, 297}, + dictWord{11, 10, 558}, + dictWord{14, 10, 322}, + dictWord{147, 10, 76}, + dictWord{6, 0, 781}, + dictWord{9, 0, 134}, + dictWord{10, 0, 2}, + dictWord{ + 10, + 0, + 27, + }, + dictWord{10, 0, 333}, + dictWord{11, 0, 722}, + dictWord{143, 0, 1}, + dictWord{5, 0, 33}, + dictWord{6, 0, 470}, + dictWord{139, 0, 424}, + dictWord{ + 135, + 0, + 2006, + }, + dictWord{12, 0, 783}, + dictWord{135, 10, 1956}, + dictWord{136, 0, 274}, + dictWord{135, 0, 1882}, + dictWord{132, 0, 794}, + dictWord{135, 0, 1848}, + dictWord{5, 10, 944}, + dictWord{134, 10, 1769}, + dictWord{6, 0, 47}, + dictWord{7, 0, 90}, + dictWord{7, 0, 664}, + dictWord{7, 0, 830}, + dictWord{7, 0, 1380}, + dictWord{ + 7, + 0, + 2025, + }, + dictWord{8, 0, 448}, + dictWord{136, 0, 828}, + dictWord{132, 10, 144}, + dictWord{134, 0, 1199}, + dictWord{4, 11, 395}, + dictWord{139, 11, 762}, + dictWord{135, 11, 1504}, + dictWord{9, 0, 417}, + dictWord{137, 0, 493}, + dictWord{9, 11, 174}, + dictWord{10, 11, 164}, + dictWord{11, 11, 440}, + dictWord{11, 11, 841}, + dictWord{143, 11, 98}, + dictWord{134, 11, 426}, + dictWord{139, 11, 1002}, + dictWord{134, 0, 295}, + dictWord{134, 0, 816}, + dictWord{6, 10, 247}, + dictWord{ + 137, + 10, + 555, + }, + dictWord{133, 0, 1019}, + dictWord{4, 0, 620}, + dictWord{5, 11, 476}, + dictWord{10, 10, 280}, + dictWord{138, 10, 797}, + dictWord{139, 0, 464}, + dictWord{5, 11, 76}, + dictWord{6, 11, 458}, + dictWord{6, 11, 497}, + dictWord{7, 11, 764}, + dictWord{7, 11, 868}, + dictWord{9, 11, 658}, + dictWord{10, 11, 594}, + dictWord{ + 11, + 11, + 173, + }, + dictWord{11, 11, 566}, + dictWord{12, 11, 20}, + dictWord{12, 11, 338}, + dictWord{141, 11, 200}, + dictWord{134, 0, 208}, + dictWord{4, 11, 526}, + dictWord{7, 11, 1029}, + dictWord{135, 11, 1054}, + dictWord{132, 11, 636}, + dictWord{6, 11, 233}, + dictWord{7, 11, 660}, + dictWord{7, 11, 1124}, + dictWord{ + 17, + 11, + 31, + }, + dictWord{19, 11, 22}, + dictWord{151, 11, 14}, + dictWord{10, 0, 442}, + dictWord{133, 10, 428}, + dictWord{10, 0, 930}, + dictWord{140, 0, 778}, + dictWord{ + 6, + 0, + 68, + }, + dictWord{7, 0, 448}, + dictWord{7, 0, 1629}, + dictWord{7, 0, 1769}, + dictWord{7, 0, 1813}, + dictWord{8, 0, 442}, + dictWord{8, 0, 516}, + dictWord{9, 0, 710}, + dictWord{ + 10, + 0, + 282, + }, + dictWord{10, 0, 722}, + dictWord{7, 10, 1717}, + dictWord{138, 10, 546}, + dictWord{134, 0, 1128}, + dictWord{11, 0, 844}, + dictWord{12, 0, 104}, + dictWord{140, 0, 625}, + dictWord{4, 11, 432}, + dictWord{135, 11, 824}, + dictWord{138, 10, 189}, + dictWord{133, 0, 787}, + dictWord{133, 10, 99}, + dictWord{ + 4, + 11, + 279, + }, + dictWord{7, 11, 301}, + dictWord{137, 11, 362}, + dictWord{8, 0, 491}, + dictWord{4, 10, 397}, + dictWord{136, 10, 555}, + dictWord{4, 11, 178}, + dictWord{ + 133, + 11, + 399, + }, + dictWord{134, 0, 711}, + dictWord{144, 0, 9}, + dictWord{4, 0, 403}, + dictWord{5, 0, 441}, + dictWord{7, 0, 450}, + dictWord{10, 0, 840}, + dictWord{11, 0, 101}, + dictWord{12, 0, 193}, + dictWord{141, 0, 430}, + dictWord{135, 11, 1246}, + dictWord{12, 10, 398}, + dictWord{20, 10, 39}, + dictWord{21, 10, 11}, + dictWord{ + 150, + 10, + 41, + }, + dictWord{4, 10, 485}, + dictWord{7, 10, 353}, + dictWord{135, 10, 1523}, + dictWord{6, 10, 366}, + dictWord{7, 10, 1384}, + dictWord{7, 10, 1601}, + dictWord{ + 135, + 11, + 1912, + }, + dictWord{7, 0, 396}, + dictWord{10, 0, 160}, + dictWord{135, 11, 396}, + dictWord{137, 10, 282}, + dictWord{134, 11, 1692}, + dictWord{4, 10, 157}, + dictWord{5, 10, 471}, + dictWord{6, 11, 202}, + dictWord{10, 11, 448}, + dictWord{11, 11, 208}, + dictWord{12, 11, 360}, + dictWord{17, 11, 117}, + dictWord{ + 17, + 11, + 118, + }, + dictWord{18, 11, 27}, + dictWord{148, 11, 67}, + dictWord{133, 0, 679}, + dictWord{137, 0, 326}, + dictWord{136, 10, 116}, + dictWord{7, 11, 872}, + dictWord{ + 10, + 11, + 516, + }, + dictWord{139, 11, 167}, + dictWord{132, 11, 224}, + dictWord{5, 11, 546}, + dictWord{7, 11, 35}, + dictWord{8, 11, 11}, + dictWord{8, 11, 12}, + dictWord{ + 9, + 11, + 315, + }, + dictWord{9, 11, 533}, + dictWord{10, 11, 802}, + dictWord{11, 11, 166}, + dictWord{12, 11, 525}, + dictWord{142, 11, 243}, + dictWord{7, 0, 1128}, + dictWord{135, 11, 1920}, + dictWord{5, 11, 241}, + dictWord{8, 11, 242}, + dictWord{9, 11, 451}, + dictWord{10, 11, 667}, + dictWord{11, 11, 598}, + dictWord{ + 140, + 11, + 429, + }, + dictWord{6, 0, 737}, + dictWord{5, 10, 160}, + dictWord{7, 10, 363}, + dictWord{7, 10, 589}, + dictWord{10, 10, 170}, + dictWord{141, 10, 55}, + dictWord{ + 135, + 0, + 1796, + }, + dictWord{142, 11, 254}, + dictWord{4, 0, 574}, + dictWord{7, 0, 350}, + dictWord{7, 0, 1024}, + dictWord{8, 0, 338}, + dictWord{9, 0, 677}, + dictWord{138, 0, 808}, + dictWord{134, 0, 1096}, + dictWord{137, 11, 516}, + dictWord{7, 0, 405}, + dictWord{10, 0, 491}, + dictWord{4, 10, 108}, + dictWord{4, 11, 366}, + dictWord{ + 139, + 10, + 498, + }, + dictWord{11, 11, 337}, + dictWord{142, 11, 303}, + dictWord{134, 11, 1736}, + dictWord{7, 0, 1081}, + dictWord{140, 11, 364}, + dictWord{7, 10, 1005}, + dictWord{140, 10, 609}, + dictWord{7, 0, 1676}, + dictWord{4, 10, 895}, + dictWord{133, 10, 772}, + dictWord{135, 0, 2037}, + dictWord{6, 0, 1207}, + dictWord{ + 11, + 11, + 916, + }, + dictWord{142, 11, 419}, + dictWord{14, 11, 140}, + dictWord{148, 11, 41}, + dictWord{6, 11, 331}, + dictWord{136, 11, 623}, + dictWord{9, 0, 944}, + dictWord{ + 9, + 0, + 969, + }, + dictWord{9, 0, 1022}, + dictWord{12, 0, 913}, + dictWord{12, 0, 936}, + dictWord{15, 0, 177}, + dictWord{15, 0, 193}, + dictWord{4, 10, 926}, + dictWord{ + 133, + 10, + 983, + }, + dictWord{5, 0, 354}, + dictWord{135, 11, 506}, + dictWord{8, 0, 598}, + dictWord{9, 0, 664}, + dictWord{138, 0, 441}, + dictWord{4, 11, 640}, + dictWord{ + 133, + 11, + 513, + }, + dictWord{137, 0, 297}, + dictWord{132, 10, 538}, + dictWord{6, 10, 294}, + dictWord{7, 10, 1267}, + dictWord{136, 10, 624}, + dictWord{7, 0, 1772}, + dictWord{ + 7, + 11, + 1888, + }, + dictWord{8, 11, 289}, + dictWord{11, 11, 45}, + dictWord{12, 11, 278}, + dictWord{140, 11, 537}, + dictWord{135, 10, 1325}, + dictWord{138, 0, 751}, + dictWord{141, 0, 37}, + dictWord{134, 0, 1828}, + dictWord{132, 10, 757}, + dictWord{132, 11, 394}, + dictWord{6, 0, 257}, + dictWord{135, 0, 1522}, + dictWord{ + 4, + 0, + 582, + }, + dictWord{9, 0, 191}, + dictWord{135, 11, 1931}, + dictWord{7, 11, 574}, + dictWord{7, 11, 1719}, + dictWord{137, 11, 145}, + dictWord{132, 11, 658}, + dictWord{10, 0, 790}, + dictWord{132, 11, 369}, + dictWord{9, 11, 781}, + dictWord{10, 11, 144}, + dictWord{11, 11, 385}, + dictWord{13, 11, 161}, + dictWord{13, 11, 228}, + dictWord{13, 11, 268}, + dictWord{148, 11, 107}, + dictWord{8, 0, 469}, + dictWord{10, 0, 47}, + dictWord{136, 11, 374}, + dictWord{6, 0, 306}, + dictWord{7, 0, 1140}, + dictWord{7, 0, 1340}, + dictWord{8, 0, 133}, + dictWord{138, 0, 449}, + dictWord{139, 0, 1011}, + dictWord{7, 10, 1875}, + dictWord{139, 10, 124}, + dictWord{ + 4, + 11, + 344, + }, + dictWord{6, 11, 498}, + dictWord{139, 11, 323}, + dictWord{137, 0, 299}, + dictWord{132, 0, 837}, + dictWord{133, 11, 906}, + dictWord{5, 0, 329}, + dictWord{ + 8, + 0, + 260, + }, + dictWord{138, 0, 10}, + dictWord{134, 0, 1320}, + dictWord{4, 0, 657}, + dictWord{146, 0, 158}, + dictWord{135, 0, 1191}, + dictWord{152, 0, 7}, + dictWord{ + 6, + 0, + 1939, + }, + dictWord{8, 0, 974}, + dictWord{138, 0, 996}, + dictWord{135, 0, 1665}, + dictWord{11, 11, 126}, + dictWord{139, 11, 287}, + dictWord{143, 0, 8}, + dictWord{ + 14, + 11, + 149, + }, + dictWord{14, 11, 399}, + dictWord{143, 11, 57}, + dictWord{5, 0, 66}, + dictWord{7, 0, 1896}, + dictWord{136, 0, 288}, + dictWord{7, 0, 175}, + dictWord{ + 10, + 0, + 494, + }, + dictWord{5, 10, 150}, + dictWord{8, 10, 603}, + dictWord{9, 10, 593}, + dictWord{9, 10, 634}, + dictWord{10, 10, 173}, + dictWord{11, 10, 462}, + dictWord{ + 11, + 10, + 515, + }, + dictWord{13, 10, 216}, + dictWord{13, 10, 288}, + dictWord{142, 10, 400}, + dictWord{134, 0, 1643}, + dictWord{136, 11, 21}, + dictWord{4, 0, 21}, + dictWord{ + 5, + 0, + 91, + }, + dictWord{5, 0, 648}, + dictWord{5, 0, 750}, + dictWord{5, 0, 781}, + dictWord{6, 0, 54}, + dictWord{6, 0, 112}, + dictWord{6, 0, 402}, + dictWord{6, 0, 1732}, + dictWord{ + 7, + 0, + 315, + }, + dictWord{7, 0, 749}, + dictWord{7, 0, 1427}, + dictWord{7, 0, 1900}, + dictWord{9, 0, 78}, + dictWord{9, 0, 508}, + dictWord{10, 0, 611}, + dictWord{10, 0, 811}, + dictWord{11, 0, 510}, + dictWord{11, 0, 728}, + dictWord{13, 0, 36}, + dictWord{14, 0, 39}, + dictWord{16, 0, 83}, + dictWord{17, 0, 124}, + dictWord{148, 0, 30}, + dictWord{ + 4, + 0, + 668, + }, + dictWord{136, 0, 570}, + dictWord{10, 0, 322}, + dictWord{10, 0, 719}, + dictWord{139, 0, 407}, + dictWord{135, 11, 1381}, + dictWord{136, 11, 193}, + dictWord{12, 10, 108}, + dictWord{141, 10, 291}, + dictWord{132, 11, 616}, + dictWord{136, 11, 692}, + dictWord{8, 0, 125}, + dictWord{8, 0, 369}, + dictWord{8, 0, 524}, + dictWord{10, 0, 486}, + dictWord{11, 0, 13}, + dictWord{11, 0, 381}, + dictWord{11, 0, 736}, + dictWord{11, 0, 766}, + dictWord{11, 0, 845}, + dictWord{13, 0, 114}, + dictWord{ + 13, + 0, + 292, + }, + dictWord{142, 0, 47}, + dictWord{134, 0, 1247}, + dictWord{6, 0, 1684}, + dictWord{6, 0, 1731}, + dictWord{7, 0, 356}, + dictWord{8, 0, 54}, + dictWord{8, 0, 221}, + dictWord{9, 0, 225}, + dictWord{9, 0, 356}, + dictWord{10, 0, 77}, + dictWord{10, 0, 446}, + dictWord{10, 0, 731}, + dictWord{12, 0, 404}, + dictWord{141, 0, 491}, + dictWord{135, 10, 1777}, + dictWord{4, 11, 305}, + dictWord{4, 10, 493}, + dictWord{144, 10, 55}, + dictWord{4, 0, 951}, + dictWord{6, 0, 1809}, + dictWord{6, 0, 1849}, + dictWord{8, 0, 846}, + dictWord{8, 0, 866}, + dictWord{8, 0, 899}, + dictWord{10, 0, 896}, + dictWord{12, 0, 694}, + dictWord{142, 0, 468}, + dictWord{5, 11, 214}, + dictWord{ + 7, + 11, + 603, + }, + dictWord{8, 11, 611}, + dictWord{9, 11, 686}, + dictWord{10, 11, 88}, + dictWord{11, 11, 459}, + dictWord{11, 11, 496}, + dictWord{12, 11, 463}, + dictWord{ + 12, + 11, + 590, + }, + dictWord{13, 11, 0}, + dictWord{142, 11, 214}, + dictWord{132, 0, 411}, + dictWord{4, 0, 80}, + dictWord{133, 0, 44}, + dictWord{140, 11, 74}, + dictWord{ + 143, + 0, + 31, + }, + dictWord{7, 0, 669}, + dictWord{6, 10, 568}, + dictWord{7, 10, 1804}, + dictWord{8, 10, 362}, + dictWord{8, 10, 410}, + dictWord{8, 10, 830}, + dictWord{9, 10, 514}, + dictWord{11, 10, 649}, + dictWord{142, 10, 157}, + dictWord{7, 0, 673}, + dictWord{134, 11, 1703}, + dictWord{132, 10, 625}, + dictWord{134, 0, 1303}, + dictWord{ + 5, + 0, + 299, + }, + dictWord{135, 0, 1083}, + dictWord{138, 0, 704}, + dictWord{6, 0, 275}, + dictWord{7, 0, 408}, + dictWord{6, 10, 158}, + dictWord{7, 10, 129}, + dictWord{ + 7, + 10, + 181, + }, + dictWord{8, 10, 276}, + dictWord{8, 10, 377}, + dictWord{10, 10, 523}, + dictWord{11, 10, 816}, + dictWord{12, 10, 455}, + dictWord{13, 10, 303}, + dictWord{ + 142, + 10, + 135, + }, + dictWord{4, 0, 219}, + dictWord{7, 0, 367}, + dictWord{7, 0, 1713}, + dictWord{7, 0, 1761}, + dictWord{9, 0, 86}, + dictWord{9, 0, 537}, + dictWord{10, 0, 165}, + dictWord{12, 0, 219}, + dictWord{140, 0, 561}, + dictWord{8, 0, 216}, + dictWord{4, 10, 1}, + dictWord{4, 11, 737}, + dictWord{6, 11, 317}, + dictWord{7, 10, 1143}, + dictWord{ + 7, + 10, + 1463, + }, + dictWord{9, 10, 207}, + dictWord{9, 10, 390}, + dictWord{9, 10, 467}, + dictWord{10, 11, 98}, + dictWord{11, 11, 294}, + dictWord{11, 10, 836}, + dictWord{ + 12, + 11, + 60, + }, + dictWord{12, 11, 437}, + dictWord{13, 11, 64}, + dictWord{13, 11, 380}, + dictWord{142, 11, 430}, + dictWord{6, 11, 1758}, + dictWord{8, 11, 520}, + dictWord{9, 11, 345}, + dictWord{9, 11, 403}, + dictWord{142, 11, 350}, + dictWord{5, 11, 47}, + dictWord{10, 11, 242}, + dictWord{138, 11, 579}, + dictWord{5, 11, 139}, + dictWord{7, 11, 1168}, + dictWord{138, 11, 539}, + dictWord{135, 0, 1319}, + dictWord{4, 10, 295}, + dictWord{4, 10, 723}, + dictWord{5, 10, 895}, + dictWord{ + 7, + 10, + 1031, + }, + dictWord{8, 10, 199}, + dictWord{8, 10, 340}, + dictWord{9, 10, 153}, + dictWord{9, 10, 215}, + dictWord{10, 10, 21}, + dictWord{10, 10, 59}, + dictWord{ + 10, + 10, + 80, + }, + dictWord{10, 10, 224}, + dictWord{10, 10, 838}, + dictWord{11, 10, 229}, + dictWord{11, 10, 652}, + dictWord{12, 10, 192}, + dictWord{13, 10, 146}, + dictWord{ + 142, + 10, + 91, + }, + dictWord{140, 0, 428}, + dictWord{137, 10, 51}, + dictWord{133, 0, 514}, + dictWord{5, 10, 309}, + dictWord{140, 10, 211}, + dictWord{6, 0, 1010}, + dictWord{5, 10, 125}, + dictWord{8, 10, 77}, + dictWord{138, 10, 15}, + dictWord{4, 0, 55}, + dictWord{5, 0, 301}, + dictWord{6, 0, 571}, + dictWord{142, 0, 49}, + dictWord{ + 146, + 0, + 102, + }, + dictWord{136, 11, 370}, + dictWord{4, 11, 107}, + dictWord{7, 11, 613}, + dictWord{8, 11, 358}, + dictWord{8, 11, 439}, + dictWord{8, 11, 504}, + dictWord{ + 9, + 11, + 501, + }, + dictWord{10, 11, 383}, + dictWord{139, 11, 477}, + dictWord{132, 11, 229}, + dictWord{133, 0, 364}, + dictWord{133, 10, 439}, + dictWord{4, 11, 903}, + dictWord{135, 11, 1816}, + dictWord{11, 0, 379}, + dictWord{140, 10, 76}, + dictWord{4, 0, 76}, + dictWord{4, 0, 971}, + dictWord{7, 0, 1550}, + dictWord{9, 0, 306}, + dictWord{ + 9, + 0, + 430, + }, + dictWord{9, 0, 663}, + dictWord{10, 0, 683}, + dictWord{10, 0, 921}, + dictWord{11, 0, 427}, + dictWord{11, 0, 753}, + dictWord{12, 0, 334}, + dictWord{12, 0, 442}, + dictWord{14, 0, 258}, + dictWord{14, 0, 366}, + dictWord{143, 0, 131}, + dictWord{137, 0, 52}, + dictWord{4, 11, 47}, + dictWord{6, 11, 373}, + dictWord{7, 11, 452}, + dictWord{7, 11, 543}, + dictWord{7, 11, 1714}, + dictWord{7, 11, 1856}, + dictWord{9, 11, 6}, + dictWord{11, 11, 257}, + dictWord{139, 11, 391}, + dictWord{4, 10, 8}, + dictWord{ + 7, + 10, + 1152, + }, + dictWord{7, 10, 1153}, + dictWord{7, 10, 1715}, + dictWord{9, 10, 374}, + dictWord{10, 10, 478}, + dictWord{139, 10, 648}, + dictWord{4, 11, 785}, + dictWord{133, 11, 368}, + dictWord{135, 10, 1099}, + dictWord{135, 11, 860}, + dictWord{5, 11, 980}, + dictWord{134, 11, 1754}, + dictWord{134, 0, 1258}, + dictWord{ + 6, + 0, + 1058, + }, + dictWord{6, 0, 1359}, + dictWord{7, 11, 536}, + dictWord{7, 11, 1331}, + dictWord{136, 11, 143}, + dictWord{4, 0, 656}, + dictWord{135, 0, 779}, + dictWord{136, 10, 87}, + dictWord{5, 11, 19}, + dictWord{6, 11, 533}, + dictWord{146, 11, 126}, + dictWord{7, 0, 144}, + dictWord{138, 10, 438}, + dictWord{5, 11, 395}, + dictWord{5, 11, 951}, + dictWord{134, 11, 1776}, + dictWord{135, 0, 1373}, + dictWord{7, 0, 554}, + dictWord{7, 0, 605}, + dictWord{141, 0, 10}, + dictWord{4, 10, 69}, + dictWord{ + 5, + 10, + 122, + }, + dictWord{9, 10, 656}, + dictWord{138, 10, 464}, + dictWord{5, 10, 849}, + dictWord{134, 10, 1633}, + dictWord{5, 0, 838}, + dictWord{5, 0, 841}, + dictWord{134, 0, 1649}, + dictWord{133, 0, 1012}, + dictWord{139, 10, 499}, + dictWord{7, 10, 476}, + dictWord{7, 10, 1592}, + dictWord{138, 10, 87}, + dictWord{ + 6, + 0, + 251, + }, + dictWord{7, 0, 365}, + dictWord{7, 0, 1357}, + dictWord{7, 0, 1497}, + dictWord{8, 0, 154}, + dictWord{141, 0, 281}, + dictWord{132, 11, 441}, + dictWord{ + 132, + 11, + 695, + }, + dictWord{7, 11, 497}, + dictWord{9, 11, 387}, + dictWord{147, 11, 81}, + dictWord{133, 0, 340}, + dictWord{14, 10, 283}, + dictWord{142, 11, 283}, + dictWord{ + 134, + 0, + 810, + }, + dictWord{135, 11, 1894}, + dictWord{139, 0, 495}, + dictWord{5, 11, 284}, + dictWord{6, 11, 49}, + dictWord{6, 11, 350}, + dictWord{7, 11, 1}, + dictWord{ + 7, + 11, + 377, + }, + dictWord{7, 11, 1693}, + dictWord{8, 11, 18}, + dictWord{8, 11, 678}, + dictWord{9, 11, 161}, + dictWord{9, 11, 585}, + dictWord{9, 11, 671}, + dictWord{ + 9, + 11, + 839, + }, + dictWord{11, 11, 912}, + dictWord{141, 11, 427}, + dictWord{5, 10, 859}, + dictWord{7, 10, 1160}, + dictWord{8, 10, 107}, + dictWord{9, 10, 291}, + dictWord{ + 9, + 10, + 439, + }, + dictWord{10, 10, 663}, + dictWord{11, 10, 609}, + dictWord{140, 10, 197}, + dictWord{8, 0, 261}, + dictWord{9, 0, 144}, + dictWord{9, 0, 466}, + dictWord{ + 10, + 0, + 370, + }, + dictWord{12, 0, 470}, + dictWord{13, 0, 144}, + dictWord{142, 0, 348}, + dictWord{137, 0, 897}, + dictWord{6, 0, 248}, + dictWord{9, 0, 546}, + dictWord{10, 0, 535}, + dictWord{11, 0, 681}, + dictWord{141, 0, 135}, + dictWord{4, 0, 358}, + dictWord{135, 0, 1496}, + dictWord{134, 0, 567}, + dictWord{136, 0, 445}, + dictWord{ + 4, + 10, + 117, + }, + dictWord{6, 10, 372}, + dictWord{7, 10, 1905}, + dictWord{142, 10, 323}, + dictWord{4, 10, 722}, + dictWord{139, 10, 471}, + dictWord{6, 0, 697}, + dictWord{ + 134, + 0, + 996, + }, + dictWord{7, 11, 2007}, + dictWord{9, 11, 101}, + dictWord{9, 11, 450}, + dictWord{10, 11, 66}, + dictWord{10, 11, 842}, + dictWord{11, 11, 536}, + dictWord{ + 140, + 11, + 587, + }, + dictWord{132, 0, 577}, + dictWord{134, 0, 1336}, + dictWord{9, 10, 5}, + dictWord{12, 10, 216}, + dictWord{12, 10, 294}, + dictWord{12, 10, 298}, + dictWord{12, 10, 400}, + dictWord{12, 10, 518}, + dictWord{13, 10, 229}, + dictWord{143, 10, 139}, + dictWord{6, 0, 174}, + dictWord{138, 0, 917}, + dictWord{ + 134, + 10, + 1774, + }, + dictWord{5, 10, 12}, + dictWord{7, 10, 375}, + dictWord{9, 10, 88}, + dictWord{9, 10, 438}, + dictWord{11, 11, 62}, + dictWord{139, 10, 270}, + dictWord{ + 134, + 11, + 1766, + }, + dictWord{6, 11, 0}, + dictWord{7, 11, 84}, + dictWord{7, 10, 816}, + dictWord{7, 10, 1241}, + dictWord{9, 10, 283}, + dictWord{9, 10, 520}, + dictWord{10, 10, 213}, + dictWord{10, 10, 307}, + dictWord{10, 10, 463}, + dictWord{10, 10, 671}, + dictWord{10, 10, 746}, + dictWord{11, 10, 401}, + dictWord{11, 10, 794}, + dictWord{ + 11, + 11, + 895, + }, + dictWord{12, 10, 517}, + dictWord{17, 11, 11}, + dictWord{18, 10, 107}, + dictWord{147, 10, 115}, + dictWord{5, 0, 878}, + dictWord{133, 0, 972}, + dictWord{ + 6, + 11, + 1665, + }, + dictWord{7, 11, 256}, + dictWord{7, 11, 1388}, + dictWord{138, 11, 499}, + dictWord{4, 10, 258}, + dictWord{136, 10, 639}, + dictWord{4, 11, 22}, + dictWord{5, 11, 10}, + dictWord{6, 10, 22}, + dictWord{7, 11, 848}, + dictWord{7, 10, 903}, + dictWord{7, 10, 1963}, + dictWord{8, 11, 97}, + dictWord{138, 10, 577}, + dictWord{ + 5, + 10, + 681, + }, + dictWord{136, 10, 782}, + dictWord{133, 11, 481}, + dictWord{132, 0, 351}, + dictWord{4, 10, 664}, + dictWord{5, 10, 804}, + dictWord{139, 10, 1013}, + dictWord{6, 11, 134}, + dictWord{7, 11, 437}, + dictWord{7, 11, 959}, + dictWord{9, 11, 37}, + dictWord{14, 11, 285}, + dictWord{14, 11, 371}, + dictWord{144, 11, 60}, + dictWord{7, 11, 486}, + dictWord{8, 11, 155}, + dictWord{11, 11, 93}, + dictWord{140, 11, 164}, + dictWord{132, 0, 286}, + dictWord{7, 0, 438}, + dictWord{7, 0, 627}, + dictWord{7, 0, 1516}, + dictWord{8, 0, 40}, + dictWord{9, 0, 56}, + dictWord{9, 0, 294}, + dictWord{10, 0, 30}, + dictWord{11, 0, 969}, + dictWord{11, 0, 995}, + dictWord{146, 0, 148}, + dictWord{5, 11, 591}, + dictWord{135, 11, 337}, + dictWord{134, 0, 1950}, + dictWord{133, 10, 32}, + dictWord{138, 11, 500}, + dictWord{5, 11, 380}, + dictWord{ + 5, + 11, + 650, + }, + dictWord{136, 11, 310}, + dictWord{4, 11, 364}, + dictWord{7, 11, 1156}, + dictWord{7, 11, 1187}, + dictWord{137, 11, 409}, + dictWord{4, 0, 738}, + dictWord{134, 11, 482}, + dictWord{4, 11, 781}, + dictWord{6, 11, 487}, + dictWord{7, 11, 926}, + dictWord{8, 11, 263}, + dictWord{139, 11, 500}, + dictWord{135, 11, 418}, + dictWord{6, 0, 2047}, + dictWord{10, 0, 969}, + dictWord{4, 10, 289}, + dictWord{7, 10, 629}, + dictWord{7, 10, 1698}, + dictWord{7, 10, 1711}, + dictWord{ + 140, + 10, + 215, + }, + dictWord{6, 10, 450}, + dictWord{136, 10, 109}, + dictWord{134, 0, 818}, + dictWord{136, 10, 705}, + dictWord{133, 0, 866}, + dictWord{4, 11, 94}, + dictWord{ + 135, + 11, + 1265, + }, + dictWord{132, 11, 417}, + dictWord{134, 0, 1467}, + dictWord{135, 10, 1238}, + dictWord{4, 0, 972}, + dictWord{6, 0, 1851}, + dictWord{ + 134, + 0, + 1857, + }, + dictWord{134, 0, 355}, + dictWord{133, 0, 116}, + dictWord{132, 0, 457}, + dictWord{135, 11, 1411}, + dictWord{4, 11, 408}, + dictWord{4, 11, 741}, + dictWord{135, 11, 500}, + dictWord{134, 10, 26}, + dictWord{142, 11, 137}, + dictWord{5, 0, 527}, + dictWord{6, 0, 189}, + dictWord{7, 0, 859}, + dictWord{136, 0, 267}, + dictWord{11, 0, 104}, + dictWord{11, 0, 554}, + dictWord{15, 0, 60}, + dictWord{143, 0, 125}, + dictWord{134, 0, 1613}, + dictWord{4, 10, 414}, + dictWord{5, 10, 467}, + dictWord{ + 9, + 10, + 654, + }, + dictWord{10, 10, 451}, + dictWord{12, 10, 59}, + dictWord{141, 10, 375}, + dictWord{135, 10, 17}, + dictWord{134, 0, 116}, + dictWord{135, 11, 541}, + dictWord{135, 10, 955}, + dictWord{6, 11, 73}, + dictWord{135, 11, 177}, + dictWord{133, 11, 576}, + dictWord{134, 0, 886}, + dictWord{133, 0, 487}, + dictWord{ + 4, + 0, + 86, + }, + dictWord{5, 0, 667}, + dictWord{5, 0, 753}, + dictWord{6, 0, 316}, + dictWord{6, 0, 455}, + dictWord{135, 0, 946}, + dictWord{142, 11, 231}, + dictWord{150, 0, 45}, + dictWord{134, 0, 863}, + dictWord{134, 0, 1953}, + dictWord{6, 10, 280}, + dictWord{10, 10, 502}, + dictWord{11, 10, 344}, + dictWord{140, 10, 38}, + dictWord{4, 0, 79}, + dictWord{7, 0, 1773}, + dictWord{10, 0, 450}, + dictWord{11, 0, 589}, + dictWord{13, 0, 332}, + dictWord{13, 0, 493}, + dictWord{14, 0, 183}, + dictWord{14, 0, 334}, + dictWord{14, 0, 362}, + dictWord{14, 0, 368}, + dictWord{14, 0, 376}, + dictWord{14, 0, 379}, + dictWord{19, 0, 90}, + dictWord{19, 0, 103}, + dictWord{19, 0, 127}, + dictWord{ + 148, + 0, + 90, + }, + dictWord{5, 10, 45}, + dictWord{7, 10, 1161}, + dictWord{11, 10, 448}, + dictWord{11, 10, 880}, + dictWord{13, 10, 139}, + dictWord{13, 10, 407}, + dictWord{ + 15, + 10, + 16, + }, + dictWord{17, 10, 95}, + dictWord{18, 10, 66}, + dictWord{18, 10, 88}, + dictWord{18, 10, 123}, + dictWord{149, 10, 7}, + dictWord{136, 10, 777}, + dictWord{ + 4, + 10, + 410, + }, + dictWord{135, 10, 521}, + dictWord{135, 10, 1778}, + dictWord{135, 11, 538}, + dictWord{142, 0, 381}, + dictWord{133, 11, 413}, + dictWord{ + 134, + 0, + 1142, + }, + dictWord{6, 0, 1189}, + dictWord{136, 11, 495}, + dictWord{5, 0, 663}, + dictWord{6, 0, 1962}, + dictWord{134, 0, 2003}, + dictWord{7, 11, 54}, + dictWord{ + 8, + 11, + 312, + }, + dictWord{10, 11, 191}, + dictWord{10, 11, 614}, + dictWord{140, 11, 567}, + dictWord{132, 10, 436}, + dictWord{133, 0, 846}, + dictWord{10, 0, 528}, + dictWord{11, 0, 504}, + dictWord{7, 10, 1587}, + dictWord{135, 10, 1707}, + dictWord{5, 0, 378}, + dictWord{8, 0, 465}, + dictWord{9, 0, 286}, + dictWord{10, 0, 185}, + dictWord{ + 10, + 0, + 562, + }, + dictWord{10, 0, 635}, + dictWord{11, 0, 31}, + dictWord{11, 0, 393}, + dictWord{13, 0, 312}, + dictWord{18, 0, 65}, + dictWord{18, 0, 96}, + dictWord{147, 0, 89}, + dictWord{7, 0, 899}, + dictWord{14, 0, 325}, + dictWord{6, 11, 468}, + dictWord{7, 11, 567}, + dictWord{7, 11, 1478}, + dictWord{8, 11, 530}, + dictWord{142, 11, 290}, + dictWord{7, 0, 1880}, + dictWord{9, 0, 680}, + dictWord{139, 0, 798}, + dictWord{134, 0, 1770}, + dictWord{132, 0, 648}, + dictWord{150, 11, 35}, + dictWord{5, 0, 945}, + dictWord{6, 0, 1656}, + dictWord{6, 0, 1787}, + dictWord{7, 0, 167}, + dictWord{8, 0, 824}, + dictWord{9, 0, 391}, + dictWord{10, 0, 375}, + dictWord{139, 0, 185}, + dictWord{ + 6, + 11, + 484, + }, + dictWord{135, 11, 822}, + dictWord{134, 0, 2046}, + dictWord{7, 0, 1645}, + dictWord{8, 0, 352}, + dictWord{137, 0, 249}, + dictWord{132, 0, 152}, + dictWord{6, 0, 611}, + dictWord{135, 0, 1733}, + dictWord{6, 11, 1724}, + dictWord{135, 11, 2022}, + dictWord{133, 0, 1006}, + dictWord{141, 11, 96}, + dictWord{ + 5, + 0, + 420, + }, + dictWord{135, 0, 1449}, + dictWord{146, 11, 149}, + dictWord{135, 0, 832}, + dictWord{135, 10, 663}, + dictWord{133, 0, 351}, + dictWord{5, 0, 40}, + dictWord{ + 7, + 0, + 598, + }, + dictWord{7, 0, 1638}, + dictWord{8, 0, 78}, + dictWord{9, 0, 166}, + dictWord{9, 0, 640}, + dictWord{9, 0, 685}, + dictWord{9, 0, 773}, + dictWord{11, 0, 215}, + dictWord{13, 0, 65}, + dictWord{14, 0, 172}, + dictWord{14, 0, 317}, + dictWord{145, 0, 6}, + dictWord{8, 0, 60}, + dictWord{9, 0, 343}, + dictWord{139, 0, 769}, + dictWord{ + 134, + 0, + 1354, + }, + dictWord{132, 0, 724}, + dictWord{137, 0, 745}, + dictWord{132, 11, 474}, + dictWord{7, 0, 1951}, + dictWord{8, 0, 765}, + dictWord{8, 0, 772}, + dictWord{ + 140, + 0, + 671, + }, + dictWord{7, 0, 108}, + dictWord{8, 0, 219}, + dictWord{8, 0, 388}, + dictWord{9, 0, 775}, + dictWord{11, 0, 275}, + dictWord{140, 0, 464}, + dictWord{137, 0, 639}, + dictWord{135, 10, 503}, + dictWord{133, 11, 366}, + dictWord{5, 0, 15}, + dictWord{6, 0, 56}, + dictWord{7, 0, 1758}, + dictWord{8, 0, 500}, + dictWord{9, 0, 730}, + dictWord{ + 11, + 0, + 331, + }, + dictWord{13, 0, 150}, + dictWord{14, 0, 282}, + dictWord{5, 11, 305}, + dictWord{9, 11, 560}, + dictWord{141, 11, 208}, + dictWord{4, 10, 113}, + dictWord{ + 5, + 10, + 163, + }, + dictWord{5, 10, 735}, + dictWord{7, 10, 1009}, + dictWord{9, 10, 9}, + dictWord{9, 10, 771}, + dictWord{12, 10, 90}, + dictWord{13, 10, 138}, + dictWord{ + 13, + 10, + 410, + }, + dictWord{143, 10, 128}, + dictWord{4, 10, 324}, + dictWord{138, 10, 104}, + dictWord{135, 11, 466}, + dictWord{142, 11, 27}, + dictWord{134, 0, 1886}, + dictWord{5, 0, 205}, + dictWord{6, 0, 438}, + dictWord{9, 0, 711}, + dictWord{4, 11, 480}, + dictWord{6, 11, 167}, + dictWord{6, 11, 302}, + dictWord{6, 11, 1642}, + dictWord{ + 7, + 11, + 130, + }, + dictWord{7, 11, 656}, + dictWord{7, 11, 837}, + dictWord{7, 11, 1547}, + dictWord{7, 11, 1657}, + dictWord{8, 11, 429}, + dictWord{9, 11, 228}, + dictWord{ + 10, + 11, + 643, + }, + dictWord{13, 11, 289}, + dictWord{13, 11, 343}, + dictWord{147, 11, 101}, + dictWord{134, 0, 865}, + dictWord{6, 0, 2025}, + dictWord{136, 0, 965}, + dictWord{ + 7, + 11, + 278, + }, + dictWord{10, 11, 739}, + dictWord{11, 11, 708}, + dictWord{141, 11, 348}, + dictWord{133, 0, 534}, + dictWord{135, 11, 1922}, + dictWord{ + 137, + 0, + 691, + }, + dictWord{4, 10, 935}, + dictWord{133, 10, 823}, + dictWord{6, 0, 443}, + dictWord{9, 0, 237}, + dictWord{9, 0, 571}, + dictWord{9, 0, 695}, + dictWord{10, 0, 139}, + dictWord{11, 0, 715}, + dictWord{12, 0, 417}, + dictWord{141, 0, 421}, + dictWord{5, 10, 269}, + dictWord{7, 10, 434}, + dictWord{7, 10, 891}, + dictWord{8, 10, 339}, + dictWord{ + 9, + 10, + 702, + }, + dictWord{11, 10, 594}, + dictWord{11, 10, 718}, + dictWord{145, 10, 100}, + dictWord{6, 0, 1555}, + dictWord{7, 0, 878}, + dictWord{9, 10, 485}, + dictWord{141, 10, 264}, + dictWord{134, 10, 1713}, + dictWord{7, 10, 1810}, + dictWord{11, 10, 866}, + dictWord{12, 10, 103}, + dictWord{141, 10, 495}, + dictWord{ + 135, + 10, + 900, + }, + dictWord{6, 0, 1410}, + dictWord{9, 11, 316}, + dictWord{139, 11, 256}, + dictWord{4, 0, 995}, + dictWord{135, 0, 1033}, + dictWord{132, 0, 578}, + dictWord{10, 0, 881}, + dictWord{12, 0, 740}, + dictWord{12, 0, 743}, + dictWord{140, 0, 759}, + dictWord{132, 0, 822}, + dictWord{133, 0, 923}, + dictWord{142, 10, 143}, + dictWord{135, 11, 1696}, + dictWord{6, 11, 363}, + dictWord{7, 11, 1955}, + dictWord{136, 11, 725}, + dictWord{132, 0, 924}, + dictWord{133, 0, 665}, + dictWord{ + 135, + 10, + 2029, + }, + dictWord{135, 0, 1901}, + dictWord{4, 0, 265}, + dictWord{6, 0, 1092}, + dictWord{6, 0, 1417}, + dictWord{7, 0, 807}, + dictWord{135, 0, 950}, + dictWord{ + 5, + 0, + 93, + }, + dictWord{12, 0, 267}, + dictWord{141, 0, 498}, + dictWord{135, 0, 1451}, + dictWord{5, 11, 813}, + dictWord{135, 11, 2046}, + dictWord{5, 10, 625}, + dictWord{135, 10, 1617}, + dictWord{135, 0, 747}, + dictWord{6, 0, 788}, + dictWord{137, 0, 828}, + dictWord{7, 0, 184}, + dictWord{11, 0, 307}, + dictWord{11, 0, 400}, + dictWord{15, 0, 130}, + dictWord{5, 11, 712}, + dictWord{7, 11, 1855}, + dictWord{8, 10, 425}, + dictWord{8, 10, 693}, + dictWord{9, 10, 720}, + dictWord{10, 10, 380}, + dictWord{10, 10, 638}, + dictWord{11, 11, 17}, + dictWord{11, 10, 473}, + dictWord{12, 10, 61}, + dictWord{13, 11, 321}, + dictWord{144, 11, 67}, + dictWord{135, 0, 198}, + dictWord{6, 11, 320}, + dictWord{7, 11, 781}, + dictWord{7, 11, 1921}, + dictWord{9, 11, 55}, + dictWord{10, 11, 186}, + dictWord{10, 11, 273}, + dictWord{10, 11, 664}, + dictWord{10, 11, 801}, + dictWord{11, 11, 996}, + dictWord{11, 11, 997}, + dictWord{13, 11, 157}, + dictWord{142, 11, 170}, + dictWord{136, 11, 271}, + dictWord{ + 135, + 0, + 994, + }, + dictWord{7, 11, 103}, + dictWord{7, 11, 863}, + dictWord{11, 11, 184}, + dictWord{14, 11, 299}, + dictWord{145, 11, 62}, + dictWord{11, 10, 551}, + dictWord{142, 10, 159}, + dictWord{5, 0, 233}, + dictWord{5, 0, 320}, + dictWord{6, 0, 140}, + dictWord{8, 0, 295}, + dictWord{8, 0, 615}, + dictWord{136, 11, 615}, + dictWord{ + 133, + 0, + 978, + }, + dictWord{4, 0, 905}, + dictWord{6, 0, 1701}, + dictWord{137, 0, 843}, + dictWord{132, 10, 168}, + dictWord{4, 0, 974}, + dictWord{8, 0, 850}, + dictWord{ + 12, + 0, + 709, + }, + dictWord{12, 0, 768}, + dictWord{140, 0, 786}, + dictWord{135, 10, 91}, + dictWord{152, 0, 6}, + dictWord{138, 10, 532}, + dictWord{135, 10, 1884}, + dictWord{132, 0, 509}, + dictWord{6, 0, 1307}, + dictWord{135, 0, 273}, + dictWord{5, 11, 77}, + dictWord{7, 11, 1455}, + dictWord{10, 11, 843}, + dictWord{19, 11, 73}, + dictWord{150, 11, 5}, + dictWord{132, 11, 458}, + dictWord{135, 11, 1420}, + dictWord{6, 11, 109}, + dictWord{138, 11, 382}, + dictWord{6, 0, 201}, + dictWord{6, 11, 330}, + dictWord{7, 10, 70}, + dictWord{7, 11, 1084}, + dictWord{10, 10, 240}, + dictWord{11, 11, 142}, + dictWord{147, 10, 93}, + dictWord{7, 0, 1041}, + dictWord{ + 140, + 11, + 328, + }, + dictWord{133, 11, 354}, + dictWord{134, 0, 1040}, + dictWord{133, 0, 693}, + dictWord{134, 0, 774}, + dictWord{139, 0, 234}, + dictWord{132, 0, 336}, + dictWord{7, 0, 1399}, + dictWord{139, 10, 392}, + dictWord{20, 0, 22}, + dictWord{148, 11, 22}, + dictWord{5, 0, 802}, + dictWord{7, 0, 2021}, + dictWord{136, 0, 805}, + dictWord{ + 5, + 0, + 167, + }, + dictWord{5, 0, 899}, + dictWord{6, 0, 410}, + dictWord{137, 0, 777}, + dictWord{137, 0, 789}, + dictWord{134, 0, 1705}, + dictWord{7, 10, 655}, + dictWord{ + 135, + 10, + 1844, + }, + dictWord{4, 10, 145}, + dictWord{6, 10, 176}, + dictWord{7, 10, 395}, + dictWord{137, 10, 562}, + dictWord{132, 10, 501}, + dictWord{135, 0, 10}, + dictWord{5, 0, 11}, + dictWord{6, 0, 117}, + dictWord{6, 0, 485}, + dictWord{7, 0, 1133}, + dictWord{9, 0, 582}, + dictWord{9, 0, 594}, + dictWord{10, 0, 82}, + dictWord{11, 0, 21}, + dictWord{11, 0, 818}, + dictWord{12, 0, 535}, + dictWord{13, 0, 86}, + dictWord{20, 0, 91}, + dictWord{23, 0, 13}, + dictWord{134, 10, 509}, + dictWord{4, 0, 264}, + dictWord{ + 7, + 0, + 1067, + }, + dictWord{8, 0, 204}, + dictWord{8, 0, 385}, + dictWord{139, 0, 953}, + dictWord{139, 11, 737}, + dictWord{138, 0, 56}, + dictWord{134, 0, 1917}, + dictWord{ + 133, + 0, + 470, + }, + dictWord{10, 11, 657}, + dictWord{14, 11, 297}, + dictWord{142, 11, 361}, + dictWord{135, 11, 412}, + dictWord{7, 0, 1198}, + dictWord{7, 11, 1198}, + dictWord{8, 11, 556}, + dictWord{14, 11, 123}, + dictWord{14, 11, 192}, + dictWord{143, 11, 27}, + dictWord{7, 11, 1985}, + dictWord{14, 11, 146}, + dictWord{15, 11, 42}, + dictWord{16, 11, 23}, + dictWord{17, 11, 86}, + dictWord{146, 11, 17}, + dictWord{11, 0, 1015}, + dictWord{136, 11, 122}, + dictWord{4, 10, 114}, + dictWord{ + 9, + 10, + 492, + }, + dictWord{13, 10, 462}, + dictWord{142, 10, 215}, + dictWord{4, 10, 77}, + dictWord{5, 10, 361}, + dictWord{6, 10, 139}, + dictWord{6, 10, 401}, + dictWord{ + 6, + 10, + 404, + }, + dictWord{7, 10, 413}, + dictWord{7, 10, 715}, + dictWord{7, 10, 1716}, + dictWord{11, 10, 279}, + dictWord{12, 10, 179}, + dictWord{12, 10, 258}, + dictWord{ + 13, + 10, + 244, + }, + dictWord{142, 10, 358}, + dictWord{134, 10, 1717}, + dictWord{7, 10, 1061}, + dictWord{8, 10, 82}, + dictWord{11, 10, 250}, + dictWord{12, 10, 420}, + dictWord{141, 10, 184}, + dictWord{133, 0, 715}, + dictWord{135, 10, 724}, + dictWord{9, 0, 919}, + dictWord{9, 0, 922}, + dictWord{9, 0, 927}, + dictWord{9, 0, 933}, + dictWord{9, 0, 962}, + dictWord{9, 0, 1000}, + dictWord{9, 0, 1002}, + dictWord{9, 0, 1021}, + dictWord{12, 0, 890}, + dictWord{12, 0, 907}, + dictWord{12, 0, 930}, + dictWord{ + 15, + 0, + 207, + }, + dictWord{15, 0, 228}, + dictWord{15, 0, 238}, + dictWord{149, 0, 61}, + dictWord{8, 0, 794}, + dictWord{9, 0, 400}, + dictWord{10, 0, 298}, + dictWord{142, 0, 228}, + dictWord{5, 11, 430}, + dictWord{5, 11, 932}, + dictWord{6, 11, 131}, + dictWord{7, 11, 417}, + dictWord{9, 11, 522}, + dictWord{11, 11, 314}, + dictWord{141, 11, 390}, + dictWord{132, 0, 867}, + dictWord{8, 0, 724}, + dictWord{132, 11, 507}, + dictWord{137, 11, 261}, + dictWord{4, 11, 343}, + dictWord{133, 11, 511}, + dictWord{ + 6, + 0, + 190, + }, + dictWord{7, 0, 768}, + dictWord{135, 0, 1170}, + dictWord{6, 10, 513}, + dictWord{135, 10, 1052}, + dictWord{7, 11, 455}, + dictWord{138, 11, 591}, + dictWord{134, 0, 1066}, + dictWord{137, 10, 899}, + dictWord{14, 0, 67}, + dictWord{147, 0, 60}, + dictWord{4, 0, 948}, + dictWord{18, 0, 174}, + dictWord{146, 0, 176}, + dictWord{135, 0, 1023}, + dictWord{7, 10, 1417}, + dictWord{12, 10, 382}, + dictWord{17, 10, 48}, + dictWord{152, 10, 12}, + dictWord{134, 11, 575}, + dictWord{ + 132, + 0, + 764, + }, + dictWord{6, 10, 545}, + dictWord{7, 10, 565}, + dictWord{7, 10, 1669}, + dictWord{10, 10, 114}, + dictWord{11, 10, 642}, + dictWord{140, 10, 618}, + dictWord{ + 6, + 0, + 137, + }, + dictWord{9, 0, 75}, + dictWord{9, 0, 253}, + dictWord{10, 0, 194}, + dictWord{138, 0, 444}, + dictWord{4, 0, 756}, + dictWord{133, 10, 5}, + dictWord{8, 0, 1008}, + dictWord{135, 10, 192}, + dictWord{132, 0, 842}, + dictWord{11, 0, 643}, + dictWord{12, 0, 115}, + dictWord{136, 10, 763}, + dictWord{139, 0, 67}, + dictWord{ + 133, + 10, + 759, + }, + dictWord{4, 0, 821}, + dictWord{5, 0, 760}, + dictWord{7, 0, 542}, + dictWord{8, 0, 135}, + dictWord{8, 0, 496}, + dictWord{135, 11, 580}, + dictWord{7, 10, 370}, + dictWord{7, 10, 1007}, + dictWord{7, 10, 1177}, + dictWord{135, 10, 1565}, + dictWord{135, 10, 1237}, + dictWord{140, 0, 736}, + dictWord{7, 0, 319}, + dictWord{ + 7, + 0, + 355, + }, + dictWord{7, 0, 763}, + dictWord{10, 0, 389}, + dictWord{145, 0, 43}, + dictWord{8, 11, 333}, + dictWord{138, 11, 182}, + dictWord{4, 10, 87}, + dictWord{5, 10, 250}, + dictWord{141, 10, 298}, + dictWord{138, 0, 786}, + dictWord{134, 0, 2044}, + dictWord{8, 11, 330}, + dictWord{140, 11, 477}, + dictWord{135, 11, 1338}, + dictWord{132, 11, 125}, + dictWord{134, 0, 1030}, + dictWord{134, 0, 1083}, + dictWord{132, 11, 721}, + dictWord{135, 10, 814}, + dictWord{7, 11, 776}, + dictWord{ + 8, + 11, + 145, + }, + dictWord{147, 11, 56}, + dictWord{134, 0, 1226}, + dictWord{4, 10, 57}, + dictWord{7, 10, 1195}, + dictWord{7, 10, 1438}, + dictWord{7, 10, 1548}, + dictWord{ + 7, + 10, + 1835, + }, + dictWord{7, 10, 1904}, + dictWord{9, 10, 757}, + dictWord{10, 10, 604}, + dictWord{139, 10, 519}, + dictWord{7, 11, 792}, + dictWord{8, 11, 147}, + dictWord{10, 11, 821}, + dictWord{139, 11, 1021}, + dictWord{137, 11, 797}, + dictWord{4, 0, 58}, + dictWord{5, 0, 286}, + dictWord{6, 0, 319}, + dictWord{7, 0, 402}, + dictWord{ + 7, + 0, + 1254, + }, + dictWord{7, 0, 1903}, + dictWord{8, 0, 356}, + dictWord{140, 0, 408}, + dictWord{4, 0, 389}, + dictWord{4, 0, 815}, + dictWord{9, 0, 181}, + dictWord{9, 0, 255}, + dictWord{10, 0, 8}, + dictWord{10, 0, 29}, + dictWord{10, 0, 816}, + dictWord{11, 0, 311}, + dictWord{11, 0, 561}, + dictWord{12, 0, 67}, + dictWord{141, 0, 181}, + dictWord{ + 7, + 11, + 1472, + }, + dictWord{135, 11, 1554}, + dictWord{7, 11, 1071}, + dictWord{7, 11, 1541}, + dictWord{7, 11, 1767}, + dictWord{7, 11, 1806}, + dictWord{7, 11, 1999}, + dictWord{9, 11, 248}, + dictWord{10, 11, 400}, + dictWord{11, 11, 162}, + dictWord{11, 11, 178}, + dictWord{11, 11, 242}, + dictWord{12, 11, 605}, + dictWord{ + 15, + 11, + 26, + }, + dictWord{144, 11, 44}, + dictWord{5, 11, 168}, + dictWord{5, 11, 930}, + dictWord{8, 11, 74}, + dictWord{9, 11, 623}, + dictWord{12, 11, 500}, + dictWord{ + 12, + 11, + 579, + }, + dictWord{13, 11, 41}, + dictWord{143, 11, 93}, + dictWord{6, 11, 220}, + dictWord{7, 11, 1101}, + dictWord{141, 11, 105}, + dictWord{5, 0, 474}, + dictWord{ + 7, + 0, + 507, + }, + dictWord{4, 10, 209}, + dictWord{7, 11, 507}, + dictWord{135, 10, 902}, + dictWord{132, 0, 427}, + dictWord{6, 0, 413}, + dictWord{7, 10, 335}, + dictWord{ + 7, + 10, + 1437, + }, + dictWord{7, 10, 1668}, + dictWord{8, 10, 553}, + dictWord{8, 10, 652}, + dictWord{8, 10, 656}, + dictWord{9, 10, 558}, + dictWord{11, 10, 743}, + dictWord{ + 149, + 10, + 18, + }, + dictWord{132, 0, 730}, + dictWord{6, 11, 19}, + dictWord{7, 11, 1413}, + dictWord{139, 11, 428}, + dictWord{133, 0, 373}, + dictWord{132, 10, 559}, + dictWord{7, 11, 96}, + dictWord{8, 11, 401}, + dictWord{137, 11, 896}, + dictWord{7, 0, 799}, + dictWord{7, 0, 1972}, + dictWord{5, 10, 1017}, + dictWord{138, 10, 511}, + dictWord{135, 0, 1793}, + dictWord{7, 11, 1961}, + dictWord{7, 11, 1965}, + dictWord{8, 11, 702}, + dictWord{136, 11, 750}, + dictWord{8, 11, 150}, + dictWord{8, 11, 737}, + dictWord{140, 11, 366}, + dictWord{132, 0, 322}, + dictWord{133, 10, 709}, + dictWord{8, 11, 800}, + dictWord{9, 11, 148}, + dictWord{9, 11, 872}, + dictWord{ + 9, + 11, + 890, + }, + dictWord{11, 11, 309}, + dictWord{11, 11, 1001}, + dictWord{13, 11, 267}, + dictWord{141, 11, 323}, + dictWord{134, 10, 1745}, + dictWord{7, 0, 290}, + dictWord{136, 10, 206}, + dictWord{7, 0, 1651}, + dictWord{145, 0, 89}, + dictWord{139, 0, 2}, + dictWord{132, 0, 672}, + dictWord{6, 0, 1860}, + dictWord{8, 0, 905}, + dictWord{ + 10, + 0, + 844, + }, + dictWord{10, 0, 846}, + dictWord{10, 0, 858}, + dictWord{12, 0, 699}, + dictWord{12, 0, 746}, + dictWord{140, 0, 772}, + dictWord{135, 11, 424}, + dictWord{133, 11, 547}, + dictWord{133, 0, 737}, + dictWord{5, 11, 490}, + dictWord{6, 11, 615}, + dictWord{6, 11, 620}, + dictWord{135, 11, 683}, + dictWord{6, 0, 746}, + dictWord{134, 0, 1612}, + dictWord{132, 10, 776}, + dictWord{9, 11, 385}, + dictWord{149, 11, 17}, + dictWord{133, 0, 145}, + dictWord{135, 10, 1272}, + dictWord{ + 7, + 0, + 884, + }, + dictWord{140, 0, 124}, + dictWord{4, 0, 387}, + dictWord{135, 0, 1288}, + dictWord{5, 11, 133}, + dictWord{136, 10, 406}, + dictWord{136, 11, 187}, + dictWord{ + 6, + 0, + 679, + }, + dictWord{8, 11, 8}, + dictWord{138, 11, 0}, + dictWord{135, 0, 550}, + dictWord{135, 11, 798}, + dictWord{136, 11, 685}, + dictWord{7, 11, 1086}, + dictWord{145, 11, 46}, + dictWord{8, 10, 175}, + dictWord{10, 10, 168}, + dictWord{138, 10, 573}, + dictWord{135, 0, 1305}, + dictWord{4, 0, 576}, + dictWord{ + 135, + 0, + 1263, + }, + dictWord{6, 0, 686}, + dictWord{134, 0, 1563}, + dictWord{134, 0, 607}, + dictWord{5, 0, 919}, + dictWord{134, 0, 1673}, + dictWord{148, 0, 37}, + dictWord{ + 8, + 11, + 774, + }, + dictWord{10, 11, 670}, + dictWord{140, 11, 51}, + dictWord{133, 10, 784}, + dictWord{139, 10, 882}, + dictWord{4, 0, 82}, + dictWord{5, 0, 333}, + dictWord{ + 5, + 0, + 904, + }, + dictWord{6, 0, 207}, + dictWord{7, 0, 325}, + dictWord{7, 0, 1726}, + dictWord{8, 0, 101}, + dictWord{10, 0, 778}, + dictWord{139, 0, 220}, + dictWord{135, 11, 371}, + dictWord{132, 0, 958}, + dictWord{133, 0, 903}, + dictWord{4, 11, 127}, + dictWord{5, 11, 350}, + dictWord{6, 11, 356}, + dictWord{8, 11, 426}, + dictWord{9, 11, 572}, + dictWord{10, 11, 247}, + dictWord{139, 11, 312}, + dictWord{140, 0, 147}, + dictWord{6, 11, 59}, + dictWord{7, 11, 885}, + dictWord{9, 11, 603}, + dictWord{ + 141, + 11, + 397, + }, + dictWord{10, 0, 367}, + dictWord{9, 10, 14}, + dictWord{9, 10, 441}, + dictWord{139, 10, 9}, + dictWord{11, 10, 966}, + dictWord{12, 10, 287}, + dictWord{ + 13, + 10, + 342, + }, + dictWord{13, 10, 402}, + dictWord{15, 10, 110}, + dictWord{143, 10, 163}, + dictWord{134, 0, 690}, + dictWord{132, 0, 705}, + dictWord{9, 0, 651}, + dictWord{ + 11, + 0, + 971, + }, + dictWord{13, 0, 273}, + dictWord{7, 10, 1428}, + dictWord{7, 10, 1640}, + dictWord{7, 10, 1867}, + dictWord{9, 10, 169}, + dictWord{9, 10, 182}, + dictWord{ + 9, + 10, + 367, + }, + dictWord{9, 10, 478}, + dictWord{9, 10, 506}, + dictWord{9, 10, 551}, + dictWord{9, 10, 557}, + dictWord{9, 10, 648}, + dictWord{9, 10, 697}, + dictWord{ + 9, + 10, + 705, + }, + dictWord{9, 10, 725}, + dictWord{9, 10, 787}, + dictWord{9, 10, 794}, + dictWord{10, 10, 198}, + dictWord{10, 10, 214}, + dictWord{10, 10, 267}, + dictWord{ + 10, + 10, + 275, + }, + dictWord{10, 10, 456}, + dictWord{10, 10, 551}, + dictWord{10, 10, 561}, + dictWord{10, 10, 613}, + dictWord{10, 10, 627}, + dictWord{10, 10, 668}, + dictWord{10, 10, 675}, + dictWord{10, 10, 691}, + dictWord{10, 10, 695}, + dictWord{10, 10, 707}, + dictWord{10, 10, 715}, + dictWord{11, 10, 183}, + dictWord{ + 11, + 10, + 201, + }, + dictWord{11, 10, 262}, + dictWord{11, 10, 352}, + dictWord{11, 10, 439}, + dictWord{11, 10, 493}, + dictWord{11, 10, 572}, + dictWord{11, 10, 591}, + dictWord{ + 11, + 10, + 608, + }, + dictWord{11, 10, 611}, + dictWord{11, 10, 646}, + dictWord{11, 10, 674}, + dictWord{11, 10, 711}, + dictWord{11, 10, 751}, + dictWord{11, 10, 761}, + dictWord{11, 10, 776}, + dictWord{11, 10, 785}, + dictWord{11, 10, 850}, + dictWord{11, 10, 853}, + dictWord{11, 10, 862}, + dictWord{11, 10, 865}, + dictWord{ + 11, + 10, + 868, + }, + dictWord{11, 10, 875}, + dictWord{11, 10, 898}, + dictWord{11, 10, 902}, + dictWord{11, 10, 903}, + dictWord{11, 10, 910}, + dictWord{11, 10, 932}, + dictWord{ + 11, + 10, + 942, + }, + dictWord{11, 10, 957}, + dictWord{11, 10, 967}, + dictWord{11, 10, 972}, + dictWord{12, 10, 148}, + dictWord{12, 10, 195}, + dictWord{12, 10, 220}, + dictWord{12, 10, 237}, + dictWord{12, 10, 318}, + dictWord{12, 10, 339}, + dictWord{12, 10, 393}, + dictWord{12, 10, 445}, + dictWord{12, 10, 450}, + dictWord{ + 12, + 10, + 474, + }, + dictWord{12, 10, 505}, + dictWord{12, 10, 509}, + dictWord{12, 10, 533}, + dictWord{12, 10, 591}, + dictWord{12, 10, 594}, + dictWord{12, 10, 597}, + dictWord{ + 12, + 10, + 621, + }, + dictWord{12, 10, 633}, + dictWord{12, 10, 642}, + dictWord{13, 10, 59}, + dictWord{13, 10, 60}, + dictWord{13, 10, 145}, + dictWord{13, 10, 239}, + dictWord{13, 10, 250}, + dictWord{13, 10, 329}, + dictWord{13, 10, 344}, + dictWord{13, 10, 365}, + dictWord{13, 10, 372}, + dictWord{13, 10, 387}, + dictWord{ + 13, + 10, + 403, + }, + dictWord{13, 10, 414}, + dictWord{13, 10, 456}, + dictWord{13, 10, 470}, + dictWord{13, 10, 478}, + dictWord{13, 10, 483}, + dictWord{13, 10, 489}, + dictWord{ + 14, + 10, + 55, + }, + dictWord{14, 10, 57}, + dictWord{14, 10, 81}, + dictWord{14, 10, 90}, + dictWord{14, 10, 148}, + dictWord{14, 10, 239}, + dictWord{14, 10, 266}, + dictWord{ + 14, + 10, + 321, + }, + dictWord{14, 10, 326}, + dictWord{14, 10, 327}, + dictWord{14, 10, 330}, + dictWord{14, 10, 347}, + dictWord{14, 10, 355}, + dictWord{14, 10, 401}, + dictWord{14, 10, 404}, + dictWord{14, 10, 411}, + dictWord{14, 10, 414}, + dictWord{14, 10, 416}, + dictWord{14, 10, 420}, + dictWord{15, 10, 61}, + dictWord{ + 15, + 10, + 74, + }, + dictWord{15, 10, 87}, + dictWord{15, 10, 88}, + dictWord{15, 10, 94}, + dictWord{15, 10, 96}, + dictWord{15, 10, 116}, + dictWord{15, 10, 149}, + dictWord{ + 15, + 10, + 154, + }, + dictWord{16, 10, 50}, + dictWord{16, 10, 63}, + dictWord{16, 10, 73}, + dictWord{17, 10, 2}, + dictWord{17, 10, 66}, + dictWord{17, 10, 92}, + dictWord{17, 10, 103}, + dictWord{17, 10, 112}, + dictWord{17, 10, 120}, + dictWord{18, 10, 50}, + dictWord{18, 10, 54}, + dictWord{18, 10, 82}, + dictWord{18, 10, 86}, + dictWord{18, 10, 90}, + dictWord{18, 10, 111}, + dictWord{18, 10, 115}, + dictWord{18, 10, 156}, + dictWord{19, 10, 40}, + dictWord{19, 10, 79}, + dictWord{20, 10, 78}, + dictWord{149, 10, 22}, + dictWord{7, 0, 887}, + dictWord{5, 10, 161}, + dictWord{135, 10, 839}, + dictWord{142, 11, 98}, + dictWord{134, 0, 90}, + dictWord{138, 11, 356}, + dictWord{ + 135, + 11, + 441, + }, + dictWord{6, 11, 111}, + dictWord{7, 11, 4}, + dictWord{8, 11, 163}, + dictWord{8, 11, 776}, + dictWord{138, 11, 566}, + dictWord{134, 0, 908}, + dictWord{ + 134, + 0, + 1261, + }, + dictWord{7, 0, 813}, + dictWord{12, 0, 497}, + dictWord{141, 0, 56}, + dictWord{134, 0, 1235}, + dictWord{135, 0, 429}, + dictWord{135, 11, 1994}, + dictWord{138, 0, 904}, + dictWord{6, 0, 125}, + dictWord{7, 0, 1277}, + dictWord{137, 0, 772}, + dictWord{151, 0, 12}, + dictWord{4, 0, 841}, + dictWord{5, 0, 386}, + dictWord{ + 133, + 11, + 386, + }, + dictWord{5, 11, 297}, + dictWord{135, 11, 1038}, + dictWord{6, 0, 860}, + dictWord{6, 0, 1069}, + dictWord{135, 11, 309}, + dictWord{136, 0, 946}, + dictWord{135, 10, 1814}, + dictWord{141, 11, 418}, + dictWord{136, 11, 363}, + dictWord{10, 0, 768}, + dictWord{139, 0, 787}, + dictWord{22, 11, 30}, + dictWord{ + 150, + 11, + 33, + }, + dictWord{6, 0, 160}, + dictWord{7, 0, 1106}, + dictWord{9, 0, 770}, + dictWord{11, 0, 112}, + dictWord{140, 0, 413}, + dictWord{11, 11, 216}, + dictWord{ + 139, + 11, + 340, + }, + dictWord{136, 10, 139}, + dictWord{135, 11, 1390}, + dictWord{135, 11, 808}, + dictWord{132, 11, 280}, + dictWord{12, 0, 271}, + dictWord{17, 0, 109}, + dictWord{7, 10, 643}, + dictWord{136, 10, 236}, + dictWord{140, 11, 54}, + dictWord{4, 11, 421}, + dictWord{133, 11, 548}, + dictWord{11, 0, 719}, + dictWord{12, 0, 36}, + dictWord{141, 0, 337}, + dictWord{7, 0, 581}, + dictWord{9, 0, 644}, + dictWord{137, 0, 699}, + dictWord{11, 11, 511}, + dictWord{13, 11, 394}, + dictWord{14, 11, 298}, + dictWord{14, 11, 318}, + dictWord{146, 11, 103}, + dictWord{7, 0, 304}, + dictWord{9, 0, 646}, + dictWord{9, 0, 862}, + dictWord{11, 0, 696}, + dictWord{12, 0, 208}, + dictWord{15, 0, 79}, + dictWord{147, 0, 108}, + dictWord{4, 0, 631}, + dictWord{7, 0, 1126}, + dictWord{135, 0, 1536}, + dictWord{135, 11, 1527}, + dictWord{8, 0, 880}, + dictWord{10, 0, 869}, + dictWord{138, 0, 913}, + dictWord{7, 0, 1513}, + dictWord{5, 10, 54}, + dictWord{6, 11, 254}, + dictWord{9, 11, 109}, + dictWord{138, 11, 103}, + dictWord{135, 0, 981}, + dictWord{133, 11, 729}, + dictWord{132, 10, 744}, + dictWord{132, 0, 434}, + dictWord{134, 0, 550}, + dictWord{7, 0, 930}, + dictWord{10, 0, 476}, + dictWord{13, 0, 452}, + dictWord{19, 0, 104}, + dictWord{6, 11, 1630}, + dictWord{10, 10, 402}, + dictWord{146, 10, 55}, + dictWord{5, 0, 553}, + dictWord{138, 0, 824}, + dictWord{136, 0, 452}, + dictWord{8, 0, 151}, + dictWord{137, 10, 624}, + dictWord{132, 10, 572}, + dictWord{132, 0, 772}, + dictWord{133, 11, 671}, + dictWord{ + 133, + 0, + 292, + }, + dictWord{138, 0, 135}, + dictWord{132, 11, 889}, + dictWord{140, 11, 207}, + dictWord{9, 0, 504}, + dictWord{6, 10, 43}, + dictWord{7, 10, 38}, + dictWord{ + 8, + 10, + 248, + }, + dictWord{138, 10, 513}, + dictWord{6, 0, 1089}, + dictWord{135, 11, 1910}, + dictWord{4, 11, 627}, + dictWord{133, 11, 775}, + dictWord{135, 0, 783}, + dictWord{133, 10, 766}, + dictWord{133, 10, 363}, + dictWord{7, 0, 387}, + dictWord{135, 11, 387}, + dictWord{7, 0, 393}, + dictWord{10, 0, 603}, + dictWord{11, 0, 206}, + dictWord{7, 11, 202}, + dictWord{11, 11, 362}, + dictWord{11, 11, 948}, + dictWord{140, 11, 388}, + dictWord{6, 11, 507}, + dictWord{7, 11, 451}, + dictWord{8, 11, 389}, + dictWord{12, 11, 490}, + dictWord{13, 11, 16}, + dictWord{13, 11, 215}, + dictWord{13, 11, 351}, + dictWord{18, 11, 132}, + dictWord{147, 11, 125}, + dictWord{ + 4, + 0, + 912, + }, + dictWord{9, 0, 232}, + dictWord{135, 11, 841}, + dictWord{6, 10, 258}, + dictWord{140, 10, 409}, + dictWord{5, 10, 249}, + dictWord{148, 10, 82}, + dictWord{ + 136, + 11, + 566, + }, + dictWord{6, 0, 977}, + dictWord{135, 11, 1214}, + dictWord{7, 0, 1973}, + dictWord{136, 0, 716}, + dictWord{135, 0, 98}, + dictWord{133, 0, 733}, + dictWord{ + 5, + 11, + 912, + }, + dictWord{134, 11, 1695}, + dictWord{5, 10, 393}, + dictWord{6, 10, 378}, + dictWord{7, 10, 1981}, + dictWord{9, 10, 32}, + dictWord{9, 10, 591}, + dictWord{10, 10, 685}, + dictWord{10, 10, 741}, + dictWord{142, 10, 382}, + dictWord{133, 10, 788}, + dictWord{10, 0, 19}, + dictWord{11, 0, 911}, + dictWord{7, 10, 1968}, + dictWord{141, 10, 509}, + dictWord{5, 0, 668}, + dictWord{5, 11, 236}, + dictWord{6, 11, 572}, + dictWord{8, 11, 492}, + dictWord{11, 11, 618}, + dictWord{144, 11, 56}, + dictWord{135, 11, 1789}, + dictWord{4, 0, 360}, + dictWord{5, 0, 635}, + dictWord{5, 0, 700}, + dictWord{5, 10, 58}, + dictWord{5, 10, 171}, + dictWord{5, 10, 683}, + dictWord{ + 6, + 10, + 291, + }, + dictWord{6, 10, 566}, + dictWord{7, 10, 1650}, + dictWord{11, 10, 523}, + dictWord{12, 10, 273}, + dictWord{12, 10, 303}, + dictWord{15, 10, 39}, + dictWord{143, 10, 111}, + dictWord{133, 0, 901}, + dictWord{134, 10, 589}, + dictWord{5, 11, 190}, + dictWord{136, 11, 318}, + dictWord{140, 0, 656}, + dictWord{ + 7, + 0, + 726, + }, + dictWord{152, 0, 9}, + dictWord{4, 10, 917}, + dictWord{133, 10, 1005}, + dictWord{135, 10, 1598}, + dictWord{134, 11, 491}, + dictWord{4, 10, 919}, + dictWord{133, 11, 434}, + dictWord{137, 0, 72}, + dictWord{6, 0, 1269}, + dictWord{6, 0, 1566}, + dictWord{134, 0, 1621}, + dictWord{9, 0, 463}, + dictWord{10, 0, 595}, + dictWord{4, 10, 255}, + dictWord{5, 10, 302}, + dictWord{6, 10, 132}, + dictWord{7, 10, 128}, + dictWord{7, 10, 283}, + dictWord{7, 10, 1299}, + dictWord{10, 10, 52}, + dictWord{ + 10, + 10, + 514, + }, + dictWord{11, 10, 925}, + dictWord{13, 10, 92}, + dictWord{142, 10, 309}, + dictWord{135, 0, 1454}, + dictWord{134, 0, 1287}, + dictWord{11, 0, 600}, + dictWord{13, 0, 245}, + dictWord{137, 10, 173}, + dictWord{136, 0, 989}, + dictWord{7, 0, 164}, + dictWord{7, 0, 1571}, + dictWord{9, 0, 107}, + dictWord{140, 0, 225}, + dictWord{6, 0, 1061}, + dictWord{141, 10, 442}, + dictWord{4, 0, 27}, + dictWord{5, 0, 484}, + dictWord{5, 0, 510}, + dictWord{6, 0, 434}, + dictWord{7, 0, 1000}, + dictWord{ + 7, + 0, + 1098, + }, + dictWord{136, 0, 2}, + dictWord{7, 11, 85}, + dictWord{7, 11, 247}, + dictWord{8, 11, 585}, + dictWord{10, 11, 163}, + dictWord{138, 11, 316}, + dictWord{ + 11, + 11, + 103, + }, + dictWord{142, 11, 0}, + dictWord{134, 0, 1127}, + dictWord{4, 0, 460}, + dictWord{134, 0, 852}, + dictWord{134, 10, 210}, + dictWord{4, 0, 932}, + dictWord{ + 133, + 0, + 891, + }, + dictWord{6, 0, 588}, + dictWord{147, 11, 83}, + dictWord{8, 0, 625}, + dictWord{4, 10, 284}, + dictWord{134, 10, 223}, + dictWord{134, 0, 76}, + dictWord{8, 0, 92}, + dictWord{137, 0, 221}, + dictWord{4, 11, 124}, + dictWord{10, 11, 457}, + dictWord{11, 11, 121}, + dictWord{11, 11, 169}, + dictWord{11, 11, 422}, + dictWord{ + 11, + 11, + 870, + }, + dictWord{12, 11, 214}, + dictWord{13, 11, 389}, + dictWord{14, 11, 187}, + dictWord{143, 11, 77}, + dictWord{9, 11, 618}, + dictWord{138, 11, 482}, + dictWord{ + 4, + 10, + 218, + }, + dictWord{7, 10, 526}, + dictWord{143, 10, 137}, + dictWord{13, 0, 9}, + dictWord{14, 0, 104}, + dictWord{14, 0, 311}, + dictWord{4, 10, 270}, + dictWord{ + 5, + 10, + 192, + }, + dictWord{6, 10, 332}, + dictWord{135, 10, 1322}, + dictWord{140, 10, 661}, + dictWord{135, 11, 1193}, + dictWord{6, 11, 107}, + dictWord{7, 11, 638}, + dictWord{7, 11, 1632}, + dictWord{137, 11, 396}, + dictWord{132, 0, 763}, + dictWord{4, 0, 622}, + dictWord{5, 11, 370}, + dictWord{134, 11, 1756}, + dictWord{ + 133, + 0, + 253, + }, + dictWord{135, 0, 546}, + dictWord{9, 0, 73}, + dictWord{10, 0, 110}, + dictWord{14, 0, 185}, + dictWord{17, 0, 119}, + dictWord{133, 11, 204}, + dictWord{7, 0, 624}, + dictWord{7, 0, 916}, + dictWord{10, 0, 256}, + dictWord{139, 0, 87}, + dictWord{7, 10, 379}, + dictWord{8, 10, 481}, + dictWord{137, 10, 377}, + dictWord{5, 0, 212}, + dictWord{12, 0, 35}, + dictWord{13, 0, 382}, + dictWord{5, 11, 970}, + dictWord{134, 11, 1706}, + dictWord{9, 0, 746}, + dictWord{5, 10, 1003}, + dictWord{134, 10, 149}, + dictWord{10, 0, 150}, + dictWord{11, 0, 849}, + dictWord{13, 0, 330}, + dictWord{8, 10, 262}, + dictWord{9, 10, 627}, + dictWord{11, 10, 214}, + dictWord{11, 10, 404}, + dictWord{11, 10, 457}, + dictWord{11, 10, 780}, + dictWord{11, 10, 913}, + dictWord{13, 10, 401}, + dictWord{142, 10, 200}, + dictWord{134, 0, 1466}, + dictWord{ + 135, + 11, + 3, + }, + dictWord{6, 0, 1299}, + dictWord{4, 11, 35}, + dictWord{5, 11, 121}, + dictWord{5, 11, 483}, + dictWord{5, 11, 685}, + dictWord{6, 11, 489}, + dictWord{7, 11, 1204}, + dictWord{136, 11, 394}, + dictWord{135, 10, 742}, + dictWord{4, 10, 142}, + dictWord{136, 10, 304}, + dictWord{4, 11, 921}, + dictWord{133, 11, 1007}, + dictWord{ + 134, + 0, + 1518, + }, + dictWord{6, 0, 1229}, + dictWord{135, 0, 1175}, + dictWord{133, 0, 816}, + dictWord{12, 0, 159}, + dictWord{4, 10, 471}, + dictWord{4, 11, 712}, + dictWord{ + 5, + 10, + 51, + }, + dictWord{6, 10, 602}, + dictWord{7, 10, 925}, + dictWord{8, 10, 484}, + dictWord{138, 10, 195}, + dictWord{134, 11, 1629}, + dictWord{5, 0, 869}, + dictWord{ + 5, + 0, + 968, + }, + dictWord{6, 0, 1626}, + dictWord{8, 0, 734}, + dictWord{136, 0, 784}, + dictWord{4, 0, 542}, + dictWord{6, 0, 1716}, + dictWord{6, 0, 1727}, + dictWord{ + 7, + 0, + 1082, + }, + dictWord{7, 0, 1545}, + dictWord{8, 0, 56}, + dictWord{8, 0, 118}, + dictWord{8, 0, 412}, + dictWord{8, 0, 564}, + dictWord{9, 0, 888}, + dictWord{9, 0, 908}, + dictWord{ + 10, + 0, + 50, + }, + dictWord{10, 0, 423}, + dictWord{11, 0, 685}, + dictWord{11, 0, 697}, + dictWord{11, 0, 933}, + dictWord{12, 0, 299}, + dictWord{13, 0, 126}, + dictWord{ + 13, + 0, + 136, + }, + dictWord{13, 0, 170}, + dictWord{13, 0, 190}, + dictWord{136, 10, 688}, + dictWord{132, 10, 697}, + dictWord{4, 0, 232}, + dictWord{9, 0, 202}, + dictWord{ + 10, + 0, + 474, + }, + dictWord{140, 0, 433}, + dictWord{136, 0, 212}, + dictWord{6, 0, 108}, + dictWord{7, 0, 1003}, + dictWord{7, 0, 1181}, + dictWord{8, 0, 111}, + dictWord{ + 136, + 0, + 343, + }, + dictWord{5, 10, 221}, + dictWord{135, 11, 1255}, + dictWord{133, 11, 485}, + dictWord{134, 0, 1712}, + dictWord{142, 0, 216}, + dictWord{5, 0, 643}, + dictWord{ + 6, + 0, + 516, + }, + dictWord{4, 11, 285}, + dictWord{5, 11, 317}, + dictWord{6, 11, 301}, + dictWord{7, 11, 7}, + dictWord{8, 11, 153}, + dictWord{10, 11, 766}, + dictWord{ + 11, + 11, + 468, + }, + dictWord{12, 11, 467}, + dictWord{141, 11, 143}, + dictWord{4, 0, 133}, + dictWord{7, 0, 711}, + dictWord{7, 0, 1298}, + dictWord{135, 0, 1585}, + dictWord{ + 134, + 0, + 650, + }, + dictWord{135, 11, 512}, + dictWord{6, 0, 99}, + dictWord{7, 0, 1808}, + dictWord{145, 0, 57}, + dictWord{6, 0, 246}, + dictWord{6, 0, 574}, + dictWord{7, 0, 428}, + dictWord{9, 0, 793}, + dictWord{10, 0, 669}, + dictWord{11, 0, 485}, + dictWord{11, 0, 840}, + dictWord{12, 0, 300}, + dictWord{14, 0, 250}, + dictWord{145, 0, 55}, + dictWord{ + 4, + 10, + 132, + }, + dictWord{5, 10, 69}, + dictWord{135, 10, 1242}, + dictWord{136, 0, 1023}, + dictWord{7, 0, 302}, + dictWord{132, 10, 111}, + dictWord{135, 0, 1871}, + dictWord{132, 0, 728}, + dictWord{9, 0, 252}, + dictWord{132, 10, 767}, + dictWord{6, 0, 461}, + dictWord{7, 0, 1590}, + dictWord{7, 10, 1416}, + dictWord{7, 10, 2005}, + dictWord{8, 10, 131}, + dictWord{8, 10, 466}, + dictWord{9, 10, 672}, + dictWord{13, 10, 252}, + dictWord{148, 10, 103}, + dictWord{6, 0, 323}, + dictWord{135, 0, 1564}, + dictWord{7, 0, 461}, + dictWord{136, 0, 775}, + dictWord{6, 10, 44}, + dictWord{136, 10, 368}, + dictWord{139, 0, 172}, + dictWord{132, 0, 464}, + dictWord{4, 10, 570}, + dictWord{133, 10, 120}, + dictWord{137, 11, 269}, + dictWord{6, 10, 227}, + dictWord{135, 10, 1589}, + dictWord{6, 11, 1719}, + dictWord{6, 11, 1735}, + dictWord{ + 7, + 11, + 2016, + }, + dictWord{7, 11, 2020}, + dictWord{8, 11, 837}, + dictWord{137, 11, 852}, + dictWord{7, 0, 727}, + dictWord{146, 0, 73}, + dictWord{132, 0, 1023}, + dictWord{135, 11, 852}, + dictWord{135, 10, 1529}, + dictWord{136, 0, 577}, + dictWord{138, 11, 568}, + dictWord{134, 0, 1037}, + dictWord{8, 11, 67}, + dictWord{ + 138, + 11, + 419, + }, + dictWord{4, 0, 413}, + dictWord{5, 0, 677}, + dictWord{8, 0, 432}, + dictWord{140, 0, 280}, + dictWord{10, 0, 600}, + dictWord{6, 10, 1667}, + dictWord{ + 7, + 11, + 967, + }, + dictWord{7, 10, 2036}, + dictWord{141, 11, 11}, + dictWord{6, 10, 511}, + dictWord{140, 10, 132}, + dictWord{6, 0, 799}, + dictWord{5, 10, 568}, + dictWord{ + 6, + 10, + 138, + }, + dictWord{135, 10, 1293}, + dictWord{8, 0, 159}, + dictWord{4, 10, 565}, + dictWord{136, 10, 827}, + dictWord{7, 0, 646}, + dictWord{7, 0, 1730}, + dictWord{ + 11, + 0, + 446, + }, + dictWord{141, 0, 178}, + dictWord{4, 10, 922}, + dictWord{133, 10, 1023}, + dictWord{135, 11, 11}, + dictWord{132, 0, 395}, + dictWord{11, 0, 145}, + dictWord{135, 10, 1002}, + dictWord{9, 0, 174}, + dictWord{10, 0, 164}, + dictWord{11, 0, 440}, + dictWord{11, 0, 514}, + dictWord{11, 0, 841}, + dictWord{15, 0, 98}, + dictWord{149, 0, 20}, + dictWord{134, 0, 426}, + dictWord{10, 0, 608}, + dictWord{139, 0, 1002}, + dictWord{7, 11, 320}, + dictWord{8, 11, 51}, + dictWord{12, 11, 481}, + dictWord{12, 11, 570}, + dictWord{148, 11, 106}, + dictWord{9, 0, 977}, + dictWord{9, 0, 983}, + dictWord{132, 11, 445}, + dictWord{138, 0, 250}, + dictWord{139, 0, 100}, + dictWord{6, 0, 1982}, + dictWord{136, 10, 402}, + dictWord{133, 11, 239}, + dictWord{4, 10, 716}, + dictWord{141, 10, 31}, + dictWord{5, 0, 476}, + dictWord{7, 11, 83}, + dictWord{7, 11, 1990}, + dictWord{8, 11, 130}, + dictWord{139, 11, 720}, + dictWord{8, 10, 691}, + dictWord{136, 10, 731}, + dictWord{5, 11, 123}, + dictWord{ + 6, + 11, + 530, + }, + dictWord{7, 11, 348}, + dictWord{135, 11, 1419}, + dictWord{5, 0, 76}, + dictWord{6, 0, 458}, + dictWord{6, 0, 497}, + dictWord{7, 0, 868}, + dictWord{9, 0, 658}, + dictWord{10, 0, 594}, + dictWord{11, 0, 173}, + dictWord{11, 0, 566}, + dictWord{12, 0, 20}, + dictWord{12, 0, 338}, + dictWord{141, 0, 200}, + dictWord{9, 11, 139}, + dictWord{ + 10, + 11, + 399, + }, + dictWord{11, 11, 469}, + dictWord{12, 11, 634}, + dictWord{141, 11, 223}, + dictWord{9, 10, 840}, + dictWord{138, 10, 803}, + dictWord{133, 10, 847}, + dictWord{11, 11, 223}, + dictWord{140, 11, 168}, + dictWord{132, 11, 210}, + dictWord{8, 0, 447}, + dictWord{9, 10, 53}, + dictWord{9, 10, 268}, + dictWord{9, 10, 901}, + dictWord{10, 10, 518}, + dictWord{10, 10, 829}, + dictWord{11, 10, 188}, + dictWord{13, 10, 74}, + dictWord{14, 10, 46}, + dictWord{15, 10, 17}, + dictWord{15, 10, 33}, + dictWord{17, 10, 40}, + dictWord{18, 10, 36}, + dictWord{19, 10, 20}, + dictWord{22, 10, 1}, + dictWord{152, 10, 2}, + dictWord{4, 0, 526}, + dictWord{7, 0, 1029}, + dictWord{135, 0, 1054}, + dictWord{19, 11, 59}, + dictWord{150, 11, 2}, + dictWord{4, 0, 636}, + dictWord{6, 0, 1875}, + dictWord{6, 0, 1920}, + dictWord{9, 0, 999}, + dictWord{ + 12, + 0, + 807, + }, + dictWord{12, 0, 825}, + dictWord{15, 0, 179}, + dictWord{15, 0, 190}, + dictWord{18, 0, 182}, + dictWord{136, 10, 532}, + dictWord{6, 0, 1699}, + dictWord{ + 7, + 0, + 660, + }, + dictWord{7, 0, 1124}, + dictWord{17, 0, 31}, + dictWord{19, 0, 22}, + dictWord{151, 0, 14}, + dictWord{135, 10, 681}, + dictWord{132, 11, 430}, + dictWord{ + 140, + 10, + 677, + }, + dictWord{4, 10, 684}, + dictWord{136, 10, 384}, + dictWord{132, 11, 756}, + dictWord{133, 11, 213}, + dictWord{7, 0, 188}, + dictWord{7, 10, 110}, + dictWord{ + 8, + 10, + 290, + }, + dictWord{8, 10, 591}, + dictWord{9, 10, 382}, + dictWord{9, 10, 649}, + dictWord{11, 10, 71}, + dictWord{11, 10, 155}, + dictWord{11, 10, 313}, + dictWord{ + 12, + 10, + 5, + }, + dictWord{13, 10, 325}, + dictWord{142, 10, 287}, + dictWord{7, 10, 360}, + dictWord{7, 10, 425}, + dictWord{9, 10, 66}, + dictWord{9, 10, 278}, + dictWord{ + 138, + 10, + 644, + }, + dictWord{142, 11, 164}, + dictWord{4, 0, 279}, + dictWord{7, 0, 301}, + dictWord{137, 0, 362}, + dictWord{134, 11, 586}, + dictWord{135, 0, 1743}, + dictWord{4, 0, 178}, + dictWord{133, 0, 399}, + dictWord{4, 10, 900}, + dictWord{133, 10, 861}, + dictWord{5, 10, 254}, + dictWord{7, 10, 985}, + dictWord{136, 10, 73}, + dictWord{133, 11, 108}, + dictWord{7, 10, 1959}, + dictWord{136, 10, 683}, + dictWord{133, 11, 219}, + dictWord{4, 11, 193}, + dictWord{5, 11, 916}, + dictWord{ + 7, + 11, + 364, + }, + dictWord{10, 11, 398}, + dictWord{10, 11, 726}, + dictWord{11, 11, 317}, + dictWord{11, 11, 626}, + dictWord{12, 11, 142}, + dictWord{12, 11, 288}, + dictWord{ + 12, + 11, + 678, + }, + dictWord{13, 11, 313}, + dictWord{15, 11, 113}, + dictWord{18, 11, 114}, + dictWord{21, 11, 30}, + dictWord{150, 11, 53}, + dictWord{6, 11, 241}, + dictWord{7, 11, 907}, + dictWord{8, 11, 832}, + dictWord{9, 11, 342}, + dictWord{10, 11, 729}, + dictWord{11, 11, 284}, + dictWord{11, 11, 445}, + dictWord{11, 11, 651}, + dictWord{11, 11, 863}, + dictWord{13, 11, 398}, + dictWord{146, 11, 99}, + dictWord{132, 0, 872}, + dictWord{134, 0, 831}, + dictWord{134, 0, 1692}, + dictWord{ + 6, + 0, + 202, + }, + dictWord{6, 0, 1006}, + dictWord{9, 0, 832}, + dictWord{10, 0, 636}, + dictWord{11, 0, 208}, + dictWord{12, 0, 360}, + dictWord{17, 0, 118}, + dictWord{18, 0, 27}, + dictWord{20, 0, 67}, + dictWord{137, 11, 734}, + dictWord{132, 10, 725}, + dictWord{7, 11, 993}, + dictWord{138, 11, 666}, + dictWord{134, 0, 1954}, + dictWord{ + 134, + 10, + 196, + }, + dictWord{7, 0, 872}, + dictWord{10, 0, 516}, + dictWord{139, 0, 167}, + dictWord{133, 10, 831}, + dictWord{4, 11, 562}, + dictWord{9, 11, 254}, + dictWord{ + 139, + 11, + 879, + }, + dictWord{137, 0, 313}, + dictWord{4, 0, 224}, + dictWord{132, 11, 786}, + dictWord{11, 0, 24}, + dictWord{12, 0, 170}, + dictWord{136, 10, 723}, + dictWord{ + 5, + 0, + 546, + }, + dictWord{7, 0, 35}, + dictWord{8, 0, 11}, + dictWord{8, 0, 12}, + dictWord{9, 0, 315}, + dictWord{9, 0, 533}, + dictWord{10, 0, 802}, + dictWord{11, 0, 166}, + dictWord{ + 12, + 0, + 525, + }, + dictWord{142, 0, 243}, + dictWord{7, 0, 1937}, + dictWord{13, 10, 80}, + dictWord{13, 10, 437}, + dictWord{145, 10, 74}, + dictWord{5, 0, 241}, + dictWord{ + 8, + 0, + 242, + }, + dictWord{9, 0, 451}, + dictWord{10, 0, 667}, + dictWord{11, 0, 598}, + dictWord{140, 0, 429}, + dictWord{150, 0, 46}, + dictWord{6, 0, 1273}, + dictWord{ + 137, + 0, + 830, + }, + dictWord{5, 10, 848}, + dictWord{6, 10, 66}, + dictWord{136, 10, 764}, + dictWord{6, 0, 825}, + dictWord{134, 0, 993}, + dictWord{4, 0, 1006}, + dictWord{ + 10, + 0, + 327, + }, + dictWord{13, 0, 271}, + dictWord{4, 10, 36}, + dictWord{7, 10, 1387}, + dictWord{139, 10, 755}, + dictWord{134, 0, 1023}, + dictWord{135, 0, 1580}, + dictWord{ + 4, + 0, + 366, + }, + dictWord{137, 0, 516}, + dictWord{132, 10, 887}, + dictWord{6, 0, 1736}, + dictWord{135, 0, 1891}, + dictWord{6, 11, 216}, + dictWord{7, 11, 901}, + dictWord{ + 7, + 11, + 1343, + }, + dictWord{136, 11, 493}, + dictWord{6, 10, 165}, + dictWord{138, 10, 388}, + dictWord{7, 11, 341}, + dictWord{139, 11, 219}, + dictWord{4, 10, 719}, + dictWord{135, 10, 155}, + dictWord{134, 0, 1935}, + dictWord{132, 0, 826}, + dictWord{6, 0, 331}, + dictWord{6, 0, 1605}, + dictWord{8, 0, 623}, + dictWord{11, 0, 139}, + dictWord{139, 0, 171}, + dictWord{135, 11, 1734}, + dictWord{10, 11, 115}, + dictWord{11, 11, 420}, + dictWord{12, 11, 154}, + dictWord{13, 11, 404}, + dictWord{ + 14, + 11, + 346, + }, + dictWord{15, 11, 54}, + dictWord{143, 11, 112}, + dictWord{7, 0, 288}, + dictWord{4, 10, 353}, + dictWord{6, 10, 146}, + dictWord{6, 10, 1789}, + dictWord{ + 7, + 10, + 990, + }, + dictWord{7, 10, 1348}, + dictWord{9, 10, 665}, + dictWord{9, 10, 898}, + dictWord{11, 10, 893}, + dictWord{142, 10, 212}, + dictWord{6, 0, 916}, + dictWord{134, 0, 1592}, + dictWord{7, 0, 1888}, + dictWord{4, 10, 45}, + dictWord{135, 10, 1257}, + dictWord{5, 11, 1011}, + dictWord{136, 11, 701}, + dictWord{ + 139, + 11, + 596, + }, + dictWord{4, 11, 54}, + dictWord{5, 11, 666}, + dictWord{7, 11, 1039}, + dictWord{7, 11, 1130}, + dictWord{9, 11, 195}, + dictWord{138, 11, 302}, + dictWord{ + 134, + 0, + 1471, + }, + dictWord{134, 0, 1570}, + dictWord{132, 0, 394}, + dictWord{140, 10, 65}, + dictWord{136, 10, 816}, + dictWord{135, 0, 1931}, + dictWord{7, 0, 574}, + dictWord{135, 0, 1719}, + dictWord{134, 11, 467}, + dictWord{132, 0, 658}, + dictWord{9, 0, 781}, + dictWord{10, 0, 144}, + dictWord{11, 0, 385}, + dictWord{13, 0, 161}, + dictWord{13, 0, 228}, + dictWord{13, 0, 268}, + dictWord{20, 0, 107}, + dictWord{134, 11, 1669}, + dictWord{136, 0, 374}, + dictWord{135, 0, 735}, + dictWord{4, 0, 344}, + dictWord{6, 0, 498}, + dictWord{139, 0, 323}, + dictWord{7, 0, 586}, + dictWord{7, 0, 1063}, + dictWord{6, 10, 559}, + dictWord{134, 10, 1691}, + dictWord{137, 0, 155}, + dictWord{133, 0, 906}, + dictWord{7, 11, 122}, + dictWord{9, 11, 259}, + dictWord{10, 11, 84}, + dictWord{11, 11, 470}, + dictWord{12, 11, 541}, + dictWord{ + 141, + 11, + 379, + }, + dictWord{134, 0, 1139}, + dictWord{10, 0, 108}, + dictWord{139, 0, 116}, + dictWord{134, 10, 456}, + dictWord{133, 10, 925}, + dictWord{5, 11, 82}, + dictWord{ + 5, + 11, + 131, + }, + dictWord{7, 11, 1755}, + dictWord{8, 11, 31}, + dictWord{9, 11, 168}, + dictWord{9, 11, 764}, + dictWord{139, 11, 869}, + dictWord{134, 11, 605}, + dictWord{ + 5, + 11, + 278, + }, + dictWord{137, 11, 68}, + dictWord{4, 11, 163}, + dictWord{5, 11, 201}, + dictWord{5, 11, 307}, + dictWord{5, 11, 310}, + dictWord{6, 11, 335}, + dictWord{ + 7, + 11, + 284, + }, + dictWord{136, 11, 165}, + dictWord{135, 11, 1660}, + dictWord{6, 11, 33}, + dictWord{135, 11, 1244}, + dictWord{4, 0, 616}, + dictWord{136, 11, 483}, + dictWord{8, 0, 857}, + dictWord{8, 0, 902}, + dictWord{8, 0, 910}, + dictWord{10, 0, 879}, + dictWord{12, 0, 726}, + dictWord{4, 11, 199}, + dictWord{139, 11, 34}, + dictWord{136, 0, 692}, + dictWord{6, 10, 193}, + dictWord{7, 10, 240}, + dictWord{7, 10, 1682}, + dictWord{10, 10, 51}, + dictWord{10, 10, 640}, + dictWord{11, 10, 410}, + dictWord{13, 10, 82}, + dictWord{14, 10, 247}, + dictWord{14, 10, 331}, + dictWord{142, 10, 377}, + dictWord{6, 0, 823}, + dictWord{134, 0, 983}, + dictWord{ + 139, + 10, + 411, + }, + dictWord{132, 0, 305}, + dictWord{136, 10, 633}, + dictWord{138, 11, 203}, + dictWord{134, 0, 681}, + dictWord{6, 11, 326}, + dictWord{7, 11, 677}, + dictWord{137, 11, 425}, + dictWord{5, 0, 214}, + dictWord{7, 0, 603}, + dictWord{8, 0, 611}, + dictWord{9, 0, 686}, + dictWord{10, 0, 88}, + dictWord{11, 0, 459}, + dictWord{ + 11, + 0, + 496, + }, + dictWord{12, 0, 463}, + dictWord{12, 0, 590}, + dictWord{141, 0, 0}, + dictWord{136, 0, 1004}, + dictWord{142, 0, 23}, + dictWord{134, 0, 1703}, + dictWord{ + 147, + 11, + 8, + }, + dictWord{145, 11, 56}, + dictWord{135, 0, 1443}, + dictWord{4, 10, 237}, + dictWord{135, 10, 514}, + dictWord{6, 0, 714}, + dictWord{145, 0, 19}, + dictWord{ + 5, + 11, + 358, + }, + dictWord{7, 11, 473}, + dictWord{7, 11, 1184}, + dictWord{10, 11, 662}, + dictWord{13, 11, 212}, + dictWord{13, 11, 304}, + dictWord{13, 11, 333}, + dictWord{145, 11, 98}, + dictWord{4, 0, 737}, + dictWord{10, 0, 98}, + dictWord{11, 0, 294}, + dictWord{12, 0, 60}, + dictWord{12, 0, 437}, + dictWord{13, 0, 64}, + dictWord{ + 13, + 0, + 380, + }, + dictWord{142, 0, 430}, + dictWord{6, 10, 392}, + dictWord{7, 10, 65}, + dictWord{135, 10, 2019}, + dictWord{6, 0, 1758}, + dictWord{8, 0, 520}, + dictWord{ + 9, + 0, + 345, + }, + dictWord{9, 0, 403}, + dictWord{142, 0, 350}, + dictWord{5, 0, 47}, + dictWord{10, 0, 242}, + dictWord{138, 0, 579}, + dictWord{5, 0, 139}, + dictWord{7, 0, 1168}, + dictWord{138, 0, 539}, + dictWord{134, 0, 1459}, + dictWord{13, 0, 388}, + dictWord{141, 11, 388}, + dictWord{134, 0, 253}, + dictWord{7, 10, 1260}, + dictWord{ + 135, + 10, + 1790, + }, + dictWord{10, 0, 252}, + dictWord{9, 10, 222}, + dictWord{139, 10, 900}, + dictWord{140, 0, 745}, + dictWord{133, 11, 946}, + dictWord{4, 0, 107}, + dictWord{ + 7, + 0, + 613, + }, + dictWord{8, 0, 439}, + dictWord{8, 0, 504}, + dictWord{9, 0, 501}, + dictWord{10, 0, 383}, + dictWord{139, 0, 477}, + dictWord{135, 11, 1485}, + dictWord{ + 132, + 0, + 871, + }, + dictWord{7, 11, 411}, + dictWord{7, 11, 590}, + dictWord{8, 11, 631}, + dictWord{9, 11, 323}, + dictWord{10, 11, 355}, + dictWord{11, 11, 491}, + dictWord{ + 12, + 11, + 143, + }, + dictWord{12, 11, 402}, + dictWord{13, 11, 73}, + dictWord{14, 11, 408}, + dictWord{15, 11, 107}, + dictWord{146, 11, 71}, + dictWord{132, 0, 229}, + dictWord{132, 0, 903}, + dictWord{140, 0, 71}, + dictWord{133, 0, 549}, + dictWord{4, 0, 47}, + dictWord{6, 0, 373}, + dictWord{7, 0, 452}, + dictWord{7, 0, 543}, + dictWord{ + 7, + 0, + 1828, + }, + dictWord{7, 0, 1856}, + dictWord{9, 0, 6}, + dictWord{11, 0, 257}, + dictWord{139, 0, 391}, + dictWord{7, 11, 1467}, + dictWord{8, 11, 328}, + dictWord{ + 10, + 11, + 544, + }, + dictWord{11, 11, 955}, + dictWord{13, 11, 320}, + dictWord{145, 11, 83}, + dictWord{5, 0, 980}, + dictWord{134, 0, 1754}, + dictWord{136, 0, 865}, + dictWord{ + 5, + 0, + 705, + }, + dictWord{137, 0, 606}, + dictWord{7, 0, 161}, + dictWord{8, 10, 201}, + dictWord{136, 10, 605}, + dictWord{143, 11, 35}, + dictWord{5, 11, 835}, + dictWord{ + 6, + 11, + 483, + }, + dictWord{140, 10, 224}, + dictWord{7, 0, 536}, + dictWord{7, 0, 1331}, + dictWord{136, 0, 143}, + dictWord{134, 0, 1388}, + dictWord{5, 0, 724}, + dictWord{ + 10, + 0, + 305, + }, + dictWord{11, 0, 151}, + dictWord{12, 0, 33}, + dictWord{12, 0, 121}, + dictWord{12, 0, 381}, + dictWord{17, 0, 3}, + dictWord{17, 0, 27}, + dictWord{17, 0, 78}, + dictWord{18, 0, 18}, + dictWord{19, 0, 54}, + dictWord{149, 0, 5}, + dictWord{4, 10, 523}, + dictWord{133, 10, 638}, + dictWord{5, 0, 19}, + dictWord{134, 0, 533}, + dictWord{ + 5, + 0, + 395, + }, + dictWord{5, 0, 951}, + dictWord{134, 0, 1776}, + dictWord{135, 0, 1908}, + dictWord{132, 0, 846}, + dictWord{10, 0, 74}, + dictWord{11, 0, 663}, + dictWord{ + 12, + 0, + 210, + }, + dictWord{13, 0, 166}, + dictWord{13, 0, 310}, + dictWord{14, 0, 373}, + dictWord{18, 0, 95}, + dictWord{19, 0, 43}, + dictWord{6, 10, 242}, + dictWord{7, 10, 227}, + dictWord{7, 10, 1581}, + dictWord{8, 10, 104}, + dictWord{9, 10, 113}, + dictWord{9, 10, 220}, + dictWord{9, 10, 427}, + dictWord{10, 10, 239}, + dictWord{11, 10, 579}, + dictWord{11, 10, 1023}, + dictWord{13, 10, 4}, + dictWord{13, 10, 204}, + dictWord{13, 10, 316}, + dictWord{148, 10, 86}, + dictWord{9, 11, 716}, + dictWord{11, 11, 108}, + dictWord{13, 11, 123}, + dictWord{14, 11, 252}, + dictWord{19, 11, 38}, + dictWord{21, 11, 3}, + dictWord{151, 11, 11}, + dictWord{8, 0, 372}, + dictWord{9, 0, 122}, + dictWord{138, 0, 175}, + dictWord{132, 11, 677}, + dictWord{7, 11, 1374}, + dictWord{136, 11, 540}, + dictWord{135, 10, 861}, + dictWord{132, 0, 695}, + dictWord{ + 7, + 0, + 497, + }, + dictWord{9, 0, 387}, + dictWord{147, 0, 81}, + dictWord{136, 0, 937}, + dictWord{134, 0, 718}, + dictWord{7, 0, 1328}, + dictWord{136, 10, 494}, + dictWord{ + 132, + 11, + 331, + }, + dictWord{6, 0, 1581}, + dictWord{133, 11, 747}, + dictWord{5, 0, 284}, + dictWord{6, 0, 49}, + dictWord{6, 0, 350}, + dictWord{7, 0, 1}, + dictWord{7, 0, 377}, + dictWord{7, 0, 1693}, + dictWord{8, 0, 18}, + dictWord{8, 0, 678}, + dictWord{9, 0, 161}, + dictWord{9, 0, 585}, + dictWord{9, 0, 671}, + dictWord{9, 0, 839}, + dictWord{11, 0, 912}, + dictWord{141, 0, 427}, + dictWord{7, 10, 1306}, + dictWord{8, 10, 505}, + dictWord{9, 10, 482}, + dictWord{10, 10, 126}, + dictWord{11, 10, 225}, + dictWord{12, 10, 347}, + dictWord{12, 10, 449}, + dictWord{13, 10, 19}, + dictWord{14, 10, 218}, + dictWord{142, 10, 435}, + dictWord{10, 10, 764}, + dictWord{12, 10, 120}, + dictWord{ + 13, + 10, + 39, + }, + dictWord{145, 10, 127}, + dictWord{4, 0, 597}, + dictWord{133, 10, 268}, + dictWord{134, 0, 1094}, + dictWord{4, 0, 1008}, + dictWord{134, 0, 1973}, + dictWord{132, 0, 811}, + dictWord{139, 0, 908}, + dictWord{135, 0, 1471}, + dictWord{133, 11, 326}, + dictWord{4, 10, 384}, + dictWord{135, 10, 1022}, + dictWord{ + 7, + 0, + 1935, + }, + dictWord{8, 0, 324}, + dictWord{12, 0, 42}, + dictWord{4, 11, 691}, + dictWord{7, 11, 1935}, + dictWord{8, 11, 324}, + dictWord{9, 11, 35}, + dictWord{10, 11, 680}, + dictWord{11, 11, 364}, + dictWord{12, 11, 42}, + dictWord{13, 11, 357}, + dictWord{146, 11, 16}, + dictWord{135, 0, 2014}, + dictWord{7, 0, 2007}, + dictWord{ + 9, + 0, + 101, + }, + dictWord{9, 0, 450}, + dictWord{10, 0, 66}, + dictWord{10, 0, 842}, + dictWord{11, 0, 536}, + dictWord{12, 0, 587}, + dictWord{6, 11, 32}, + dictWord{7, 11, 385}, + dictWord{7, 11, 757}, + dictWord{7, 11, 1916}, + dictWord{8, 11, 37}, + dictWord{8, 11, 94}, + dictWord{8, 11, 711}, + dictWord{9, 11, 541}, + dictWord{10, 11, 162}, + dictWord{ + 10, + 11, + 795, + }, + dictWord{11, 11, 989}, + dictWord{11, 11, 1010}, + dictWord{12, 11, 14}, + dictWord{142, 11, 308}, + dictWord{139, 0, 586}, + dictWord{ + 135, + 10, + 1703, + }, + dictWord{7, 0, 1077}, + dictWord{11, 0, 28}, + dictWord{9, 10, 159}, + dictWord{140, 10, 603}, + dictWord{6, 0, 1221}, + dictWord{136, 10, 583}, + dictWord{ + 6, + 11, + 152, + }, + dictWord{6, 11, 349}, + dictWord{6, 11, 1682}, + dictWord{7, 11, 1252}, + dictWord{8, 11, 112}, + dictWord{9, 11, 435}, + dictWord{9, 11, 668}, + dictWord{ + 10, + 11, + 290, + }, + dictWord{10, 11, 319}, + dictWord{10, 11, 815}, + dictWord{11, 11, 180}, + dictWord{11, 11, 837}, + dictWord{12, 11, 240}, + dictWord{13, 11, 152}, + dictWord{13, 11, 219}, + dictWord{142, 11, 158}, + dictWord{139, 0, 62}, + dictWord{132, 10, 515}, + dictWord{8, 10, 632}, + dictWord{8, 10, 697}, + dictWord{ + 137, + 10, + 854, + }, + dictWord{134, 0, 1766}, + dictWord{132, 11, 581}, + dictWord{6, 11, 126}, + dictWord{7, 11, 573}, + dictWord{8, 11, 397}, + dictWord{142, 11, 44}, + dictWord{ + 150, + 0, + 28, + }, + dictWord{11, 0, 670}, + dictWord{22, 0, 25}, + dictWord{4, 10, 136}, + dictWord{133, 10, 551}, + dictWord{6, 0, 1665}, + dictWord{7, 0, 256}, + dictWord{ + 7, + 0, + 1388, + }, + dictWord{138, 0, 499}, + dictWord{4, 0, 22}, + dictWord{5, 0, 10}, + dictWord{7, 0, 1576}, + dictWord{136, 0, 97}, + dictWord{134, 10, 1782}, + dictWord{5, 0, 481}, + dictWord{7, 10, 1287}, + dictWord{9, 10, 44}, + dictWord{10, 10, 552}, + dictWord{10, 10, 642}, + dictWord{11, 10, 839}, + dictWord{12, 10, 274}, + dictWord{ + 12, + 10, + 275, + }, + dictWord{12, 10, 372}, + dictWord{13, 10, 91}, + dictWord{142, 10, 125}, + dictWord{133, 11, 926}, + dictWord{7, 11, 1232}, + dictWord{137, 11, 531}, + dictWord{6, 0, 134}, + dictWord{7, 0, 437}, + dictWord{7, 0, 1824}, + dictWord{9, 0, 37}, + dictWord{14, 0, 285}, + dictWord{142, 0, 371}, + dictWord{7, 0, 486}, + dictWord{8, 0, 155}, + dictWord{11, 0, 93}, + dictWord{140, 0, 164}, + dictWord{6, 0, 1391}, + dictWord{134, 0, 1442}, + dictWord{133, 11, 670}, + dictWord{133, 0, 591}, + dictWord{ + 6, + 10, + 147, + }, + dictWord{7, 10, 886}, + dictWord{7, 11, 1957}, + dictWord{9, 10, 753}, + dictWord{138, 10, 268}, + dictWord{5, 0, 380}, + dictWord{5, 0, 650}, + dictWord{ + 7, + 0, + 1173, + }, + dictWord{136, 0, 310}, + dictWord{4, 0, 364}, + dictWord{7, 0, 1156}, + dictWord{7, 0, 1187}, + dictWord{137, 0, 409}, + dictWord{135, 11, 1621}, + dictWord{ + 134, + 0, + 482, + }, + dictWord{133, 11, 506}, + dictWord{4, 0, 781}, + dictWord{6, 0, 487}, + dictWord{7, 0, 926}, + dictWord{8, 0, 263}, + dictWord{139, 0, 500}, + dictWord{ + 138, + 10, + 137, + }, + dictWord{135, 11, 242}, + dictWord{139, 11, 96}, + dictWord{133, 10, 414}, + dictWord{135, 10, 1762}, + dictWord{134, 0, 804}, + dictWord{5, 11, 834}, + dictWord{7, 11, 1202}, + dictWord{8, 11, 14}, + dictWord{9, 11, 481}, + dictWord{137, 11, 880}, + dictWord{134, 10, 599}, + dictWord{4, 0, 94}, + dictWord{135, 0, 1265}, + dictWord{4, 0, 415}, + dictWord{132, 0, 417}, + dictWord{5, 0, 348}, + dictWord{6, 0, 522}, + dictWord{6, 10, 1749}, + dictWord{7, 11, 1526}, + dictWord{138, 11, 465}, + dictWord{134, 10, 1627}, + dictWord{132, 0, 1012}, + dictWord{132, 10, 488}, + dictWord{4, 11, 357}, + dictWord{6, 11, 172}, + dictWord{7, 11, 143}, + dictWord{ + 137, + 11, + 413, + }, + dictWord{4, 10, 83}, + dictWord{4, 11, 590}, + dictWord{146, 11, 76}, + dictWord{140, 10, 676}, + dictWord{7, 11, 287}, + dictWord{8, 11, 355}, + dictWord{ + 9, + 11, + 293, + }, + dictWord{137, 11, 743}, + dictWord{134, 10, 278}, + dictWord{6, 0, 1803}, + dictWord{18, 0, 165}, + dictWord{24, 0, 21}, + dictWord{5, 11, 169}, + dictWord{ + 7, + 11, + 333, + }, + dictWord{136, 11, 45}, + dictWord{12, 10, 97}, + dictWord{140, 11, 97}, + dictWord{4, 0, 408}, + dictWord{4, 0, 741}, + dictWord{135, 0, 500}, + dictWord{ + 132, + 11, + 198, + }, + dictWord{7, 10, 388}, + dictWord{7, 10, 644}, + dictWord{139, 10, 781}, + dictWord{4, 11, 24}, + dictWord{5, 11, 140}, + dictWord{5, 11, 185}, + dictWord{ + 7, + 11, + 1500, + }, + dictWord{11, 11, 565}, + dictWord{139, 11, 838}, + dictWord{6, 0, 1321}, + dictWord{9, 0, 257}, + dictWord{7, 10, 229}, + dictWord{8, 10, 59}, + dictWord{ + 9, + 10, + 190, + }, + dictWord{10, 10, 378}, + dictWord{140, 10, 191}, + dictWord{4, 11, 334}, + dictWord{133, 11, 593}, + dictWord{135, 11, 1885}, + dictWord{134, 0, 1138}, + dictWord{4, 0, 249}, + dictWord{6, 0, 73}, + dictWord{135, 0, 177}, + dictWord{133, 0, 576}, + dictWord{142, 0, 231}, + dictWord{137, 0, 288}, + dictWord{132, 10, 660}, + dictWord{7, 10, 1035}, + dictWord{138, 10, 737}, + dictWord{135, 0, 1487}, + dictWord{6, 0, 989}, + dictWord{9, 0, 433}, + dictWord{7, 10, 690}, + dictWord{9, 10, 587}, + dictWord{140, 10, 521}, + dictWord{7, 0, 1264}, + dictWord{7, 0, 1678}, + dictWord{11, 0, 945}, + dictWord{12, 0, 341}, + dictWord{12, 0, 471}, + dictWord{140, 0, 569}, + dictWord{132, 11, 709}, + dictWord{133, 11, 897}, + dictWord{5, 11, 224}, + dictWord{13, 11, 174}, + dictWord{146, 11, 52}, + dictWord{135, 11, 1840}, + dictWord{ + 134, + 10, + 1744, + }, + dictWord{12, 0, 87}, + dictWord{16, 0, 74}, + dictWord{4, 10, 733}, + dictWord{9, 10, 194}, + dictWord{10, 10, 92}, + dictWord{11, 10, 198}, + dictWord{ + 12, + 10, + 84, + }, + dictWord{141, 10, 128}, + dictWord{140, 0, 779}, + dictWord{135, 0, 538}, + dictWord{4, 11, 608}, + dictWord{133, 11, 497}, + dictWord{133, 0, 413}, + dictWord{7, 11, 1375}, + dictWord{7, 11, 1466}, + dictWord{138, 11, 331}, + dictWord{136, 0, 495}, + dictWord{6, 11, 540}, + dictWord{136, 11, 136}, + dictWord{7, 0, 54}, + dictWord{8, 0, 312}, + dictWord{10, 0, 191}, + dictWord{10, 0, 614}, + dictWord{140, 0, 567}, + dictWord{6, 0, 468}, + dictWord{7, 0, 567}, + dictWord{7, 0, 1478}, + dictWord{ + 8, + 0, + 530, + }, + dictWord{14, 0, 290}, + dictWord{133, 11, 999}, + dictWord{4, 11, 299}, + dictWord{7, 10, 306}, + dictWord{135, 11, 1004}, + dictWord{142, 11, 296}, + dictWord{134, 0, 1484}, + dictWord{133, 10, 979}, + dictWord{6, 0, 609}, + dictWord{9, 0, 815}, + dictWord{12, 11, 137}, + dictWord{14, 11, 9}, + dictWord{14, 11, 24}, + dictWord{142, 11, 64}, + dictWord{133, 11, 456}, + dictWord{6, 0, 484}, + dictWord{135, 0, 822}, + dictWord{133, 10, 178}, + dictWord{136, 11, 180}, + dictWord{ + 132, + 11, + 755, + }, + dictWord{137, 0, 900}, + dictWord{135, 0, 1335}, + dictWord{6, 0, 1724}, + dictWord{135, 0, 2022}, + dictWord{135, 11, 1139}, + dictWord{5, 0, 640}, + dictWord{132, 10, 390}, + dictWord{6, 0, 1831}, + dictWord{138, 11, 633}, + dictWord{135, 11, 566}, + dictWord{4, 11, 890}, + dictWord{5, 11, 805}, + dictWord{5, 11, 819}, + dictWord{5, 11, 961}, + dictWord{6, 11, 396}, + dictWord{6, 11, 1631}, + dictWord{6, 11, 1678}, + dictWord{7, 11, 1967}, + dictWord{7, 11, 2041}, + dictWord{ + 9, + 11, + 630, + }, + dictWord{11, 11, 8}, + dictWord{11, 11, 1019}, + dictWord{12, 11, 176}, + dictWord{13, 11, 225}, + dictWord{14, 11, 292}, + dictWord{149, 11, 24}, + dictWord{ + 132, + 0, + 474, + }, + dictWord{134, 0, 1103}, + dictWord{135, 0, 1504}, + dictWord{134, 0, 1576}, + dictWord{6, 0, 961}, + dictWord{6, 0, 1034}, + dictWord{140, 0, 655}, + dictWord{11, 11, 514}, + dictWord{149, 11, 20}, + dictWord{5, 0, 305}, + dictWord{135, 11, 1815}, + dictWord{7, 11, 1505}, + dictWord{10, 11, 190}, + dictWord{ + 10, + 11, + 634, + }, + dictWord{11, 11, 792}, + dictWord{12, 11, 358}, + dictWord{140, 11, 447}, + dictWord{5, 11, 0}, + dictWord{6, 11, 536}, + dictWord{7, 11, 604}, + dictWord{ + 13, + 11, + 445, + }, + dictWord{145, 11, 126}, + dictWord{7, 0, 1236}, + dictWord{133, 10, 105}, + dictWord{4, 0, 480}, + dictWord{6, 0, 217}, + dictWord{6, 0, 302}, + dictWord{ + 6, + 0, + 1642, + }, + dictWord{7, 0, 130}, + dictWord{7, 0, 837}, + dictWord{7, 0, 1321}, + dictWord{7, 0, 1547}, + dictWord{7, 0, 1657}, + dictWord{8, 0, 429}, + dictWord{9, 0, 228}, + dictWord{13, 0, 289}, + dictWord{13, 0, 343}, + dictWord{19, 0, 101}, + dictWord{6, 11, 232}, + dictWord{6, 11, 412}, + dictWord{7, 11, 1074}, + dictWord{8, 11, 9}, + dictWord{ + 8, + 11, + 157, + }, + dictWord{8, 11, 786}, + dictWord{9, 11, 196}, + dictWord{9, 11, 352}, + dictWord{9, 11, 457}, + dictWord{10, 11, 337}, + dictWord{11, 11, 232}, + dictWord{ + 11, + 11, + 877, + }, + dictWord{12, 11, 480}, + dictWord{140, 11, 546}, + dictWord{5, 10, 438}, + dictWord{7, 11, 958}, + dictWord{9, 10, 694}, + dictWord{12, 10, 627}, + dictWord{ + 13, + 11, + 38, + }, + dictWord{141, 10, 210}, + dictWord{4, 11, 382}, + dictWord{136, 11, 579}, + dictWord{7, 0, 278}, + dictWord{10, 0, 739}, + dictWord{11, 0, 708}, + dictWord{ + 141, + 0, + 348, + }, + dictWord{4, 11, 212}, + dictWord{135, 11, 1206}, + dictWord{135, 11, 1898}, + dictWord{6, 0, 708}, + dictWord{6, 0, 1344}, + dictWord{152, 10, 11}, + dictWord{137, 11, 768}, + dictWord{134, 0, 1840}, + dictWord{140, 0, 233}, + dictWord{8, 10, 25}, + dictWord{138, 10, 826}, + dictWord{6, 0, 2017}, + dictWord{ + 133, + 11, + 655, + }, + dictWord{6, 0, 1488}, + dictWord{139, 11, 290}, + dictWord{132, 10, 308}, + dictWord{134, 0, 1590}, + dictWord{134, 0, 1800}, + dictWord{134, 0, 1259}, + dictWord{16, 0, 28}, + dictWord{6, 11, 231}, + dictWord{7, 11, 95}, + dictWord{136, 11, 423}, + dictWord{133, 11, 300}, + dictWord{135, 10, 150}, + dictWord{ + 136, + 10, + 649, + }, + dictWord{7, 11, 1874}, + dictWord{137, 11, 641}, + dictWord{6, 11, 237}, + dictWord{7, 11, 611}, + dictWord{8, 11, 100}, + dictWord{9, 11, 416}, + dictWord{ + 11, + 11, + 335, + }, + dictWord{12, 11, 173}, + dictWord{146, 11, 101}, + dictWord{137, 0, 45}, + dictWord{134, 10, 521}, + dictWord{17, 0, 36}, + dictWord{14, 11, 26}, + dictWord{ + 146, + 11, + 150, + }, + dictWord{7, 0, 1442}, + dictWord{14, 0, 22}, + dictWord{5, 10, 339}, + dictWord{15, 10, 41}, + dictWord{15, 10, 166}, + dictWord{147, 10, 66}, + dictWord{ + 8, + 0, + 378, + }, + dictWord{6, 11, 581}, + dictWord{135, 11, 1119}, + dictWord{134, 0, 1507}, + dictWord{147, 11, 117}, + dictWord{139, 0, 39}, + dictWord{134, 0, 1054}, + dictWord{6, 0, 363}, + dictWord{7, 0, 1955}, + dictWord{136, 0, 725}, + dictWord{134, 0, 2036}, + dictWord{133, 11, 199}, + dictWord{6, 0, 1871}, + dictWord{9, 0, 935}, + dictWord{9, 0, 961}, + dictWord{9, 0, 1004}, + dictWord{9, 0, 1016}, + dictWord{12, 0, 805}, + dictWord{12, 0, 852}, + dictWord{12, 0, 853}, + dictWord{12, 0, 869}, + dictWord{ + 12, + 0, + 882, + }, + dictWord{12, 0, 896}, + dictWord{12, 0, 906}, + dictWord{12, 0, 917}, + dictWord{12, 0, 940}, + dictWord{15, 0, 170}, + dictWord{15, 0, 176}, + dictWord{ + 15, + 0, + 188, + }, + dictWord{15, 0, 201}, + dictWord{15, 0, 205}, + dictWord{15, 0, 212}, + dictWord{15, 0, 234}, + dictWord{15, 0, 244}, + dictWord{18, 0, 181}, + dictWord{18, 0, 193}, + dictWord{18, 0, 196}, + dictWord{18, 0, 201}, + dictWord{18, 0, 202}, + dictWord{18, 0, 210}, + dictWord{18, 0, 217}, + dictWord{18, 0, 235}, + dictWord{18, 0, 236}, + dictWord{18, 0, 237}, + dictWord{21, 0, 54}, + dictWord{21, 0, 55}, + dictWord{21, 0, 58}, + dictWord{21, 0, 59}, + dictWord{152, 0, 22}, + dictWord{134, 10, 1628}, + dictWord{ + 137, + 0, + 805, + }, + dictWord{5, 0, 813}, + dictWord{135, 0, 2046}, + dictWord{142, 11, 42}, + dictWord{5, 0, 712}, + dictWord{6, 0, 1240}, + dictWord{11, 0, 17}, + dictWord{ + 13, + 0, + 321, + }, + dictWord{144, 0, 67}, + dictWord{132, 0, 617}, + dictWord{135, 10, 829}, + dictWord{6, 0, 320}, + dictWord{7, 0, 781}, + dictWord{7, 0, 1921}, + dictWord{9, 0, 55}, + dictWord{10, 0, 186}, + dictWord{10, 0, 273}, + dictWord{10, 0, 664}, + dictWord{10, 0, 801}, + dictWord{11, 0, 996}, + dictWord{11, 0, 997}, + dictWord{13, 0, 157}, + dictWord{142, 0, 170}, + dictWord{136, 0, 271}, + dictWord{5, 10, 486}, + dictWord{135, 10, 1349}, + dictWord{18, 11, 91}, + dictWord{147, 11, 70}, + dictWord{10, 0, 445}, + dictWord{7, 10, 1635}, + dictWord{8, 10, 17}, + dictWord{138, 10, 295}, + dictWord{136, 11, 404}, + dictWord{7, 0, 103}, + dictWord{7, 0, 863}, + dictWord{11, 0, 184}, + dictWord{145, 0, 62}, + dictWord{138, 10, 558}, + dictWord{137, 0, 659}, + dictWord{6, 11, 312}, + dictWord{6, 11, 1715}, + dictWord{10, 11, 584}, + dictWord{ + 11, + 11, + 546, + }, + dictWord{11, 11, 692}, + dictWord{12, 11, 259}, + dictWord{12, 11, 295}, + dictWord{13, 11, 46}, + dictWord{141, 11, 154}, + dictWord{134, 0, 676}, + dictWord{132, 11, 588}, + dictWord{4, 11, 231}, + dictWord{5, 11, 61}, + dictWord{6, 11, 104}, + dictWord{7, 11, 729}, + dictWord{7, 11, 964}, + dictWord{7, 11, 1658}, + dictWord{140, 11, 414}, + dictWord{6, 11, 263}, + dictWord{138, 11, 757}, + dictWord{11, 0, 337}, + dictWord{142, 0, 303}, + dictWord{135, 11, 1363}, + dictWord{ + 132, + 11, + 320, + }, + dictWord{140, 0, 506}, + dictWord{134, 10, 447}, + dictWord{5, 0, 77}, + dictWord{7, 0, 1455}, + dictWord{10, 0, 843}, + dictWord{147, 0, 73}, + dictWord{ + 7, + 10, + 577, + }, + dictWord{7, 10, 1432}, + dictWord{9, 10, 475}, + dictWord{9, 10, 505}, + dictWord{9, 10, 526}, + dictWord{9, 10, 609}, + dictWord{9, 10, 689}, + dictWord{ + 9, + 10, + 726, + }, + dictWord{9, 10, 735}, + dictWord{9, 10, 738}, + dictWord{10, 10, 556}, + dictWord{10, 10, 674}, + dictWord{10, 10, 684}, + dictWord{11, 10, 89}, + dictWord{ + 11, + 10, + 202, + }, + dictWord{11, 10, 272}, + dictWord{11, 10, 380}, + dictWord{11, 10, 415}, + dictWord{11, 10, 505}, + dictWord{11, 10, 537}, + dictWord{11, 10, 550}, + dictWord{11, 10, 562}, + dictWord{11, 10, 640}, + dictWord{11, 10, 667}, + dictWord{11, 10, 688}, + dictWord{11, 10, 847}, + dictWord{11, 10, 927}, + dictWord{ + 11, + 10, + 930, + }, + dictWord{11, 10, 940}, + dictWord{12, 10, 144}, + dictWord{12, 10, 325}, + dictWord{12, 10, 329}, + dictWord{12, 10, 389}, + dictWord{12, 10, 403}, + dictWord{ + 12, + 10, + 451, + }, + dictWord{12, 10, 515}, + dictWord{12, 10, 604}, + dictWord{12, 10, 616}, + dictWord{12, 10, 626}, + dictWord{13, 10, 66}, + dictWord{13, 10, 131}, + dictWord{13, 10, 167}, + dictWord{13, 10, 236}, + dictWord{13, 10, 368}, + dictWord{13, 10, 411}, + dictWord{13, 10, 434}, + dictWord{13, 10, 453}, + dictWord{ + 13, + 10, + 461, + }, + dictWord{13, 10, 474}, + dictWord{14, 10, 59}, + dictWord{14, 10, 60}, + dictWord{14, 10, 139}, + dictWord{14, 10, 152}, + dictWord{14, 10, 276}, + dictWord{ + 14, + 10, + 353, + }, + dictWord{14, 10, 402}, + dictWord{15, 10, 28}, + dictWord{15, 10, 81}, + dictWord{15, 10, 123}, + dictWord{15, 10, 152}, + dictWord{18, 10, 136}, + dictWord{148, 10, 88}, + dictWord{132, 0, 458}, + dictWord{135, 0, 1420}, + dictWord{6, 0, 109}, + dictWord{10, 0, 382}, + dictWord{4, 11, 405}, + dictWord{4, 10, 609}, + dictWord{7, 10, 756}, + dictWord{7, 11, 817}, + dictWord{9, 10, 544}, + dictWord{11, 10, 413}, + dictWord{14, 11, 58}, + dictWord{14, 10, 307}, + dictWord{16, 10, 25}, + dictWord{17, 11, 37}, + dictWord{146, 11, 124}, + dictWord{6, 0, 330}, + dictWord{7, 0, 1084}, + dictWord{11, 0, 142}, + dictWord{133, 11, 974}, + dictWord{4, 10, 930}, + dictWord{133, 10, 947}, + dictWord{5, 10, 939}, + dictWord{142, 11, 394}, + dictWord{16, 0, 91}, + dictWord{145, 0, 87}, + dictWord{5, 11, 235}, + dictWord{5, 10, 962}, + dictWord{7, 11, 1239}, + dictWord{11, 11, 131}, + dictWord{140, 11, 370}, + dictWord{11, 0, 492}, + dictWord{5, 10, 651}, + dictWord{8, 10, 170}, + dictWord{9, 10, 61}, + dictWord{9, 10, 63}, + dictWord{10, 10, 23}, + dictWord{10, 10, 37}, + dictWord{10, 10, 834}, + dictWord{11, 10, 4}, + dictWord{11, 10, 281}, + dictWord{11, 10, 503}, + dictWord{ + 11, + 10, + 677, + }, + dictWord{12, 10, 96}, + dictWord{12, 10, 130}, + dictWord{12, 10, 244}, + dictWord{14, 10, 5}, + dictWord{14, 10, 40}, + dictWord{14, 10, 162}, + dictWord{ + 14, + 10, + 202, + }, + dictWord{146, 10, 133}, + dictWord{4, 10, 406}, + dictWord{5, 10, 579}, + dictWord{12, 10, 492}, + dictWord{150, 10, 15}, + dictWord{9, 11, 137}, + dictWord{138, 11, 221}, + dictWord{134, 0, 1239}, + dictWord{11, 0, 211}, + dictWord{140, 0, 145}, + dictWord{7, 11, 390}, + dictWord{138, 11, 140}, + dictWord{ + 135, + 11, + 1418, + }, + dictWord{135, 11, 1144}, + dictWord{134, 0, 1049}, + dictWord{7, 0, 321}, + dictWord{6, 10, 17}, + dictWord{7, 10, 1001}, + dictWord{7, 10, 1982}, + dictWord{ + 9, + 10, + 886, + }, + dictWord{10, 10, 489}, + dictWord{10, 10, 800}, + dictWord{11, 10, 782}, + dictWord{12, 10, 320}, + dictWord{13, 10, 467}, + dictWord{14, 10, 145}, + dictWord{14, 10, 387}, + dictWord{143, 10, 119}, + dictWord{145, 10, 17}, + dictWord{5, 11, 407}, + dictWord{11, 11, 489}, + dictWord{19, 11, 37}, + dictWord{20, 11, 73}, + dictWord{150, 11, 38}, + dictWord{133, 10, 458}, + dictWord{135, 0, 1985}, + dictWord{7, 10, 1983}, + dictWord{8, 10, 0}, + dictWord{8, 10, 171}, + dictWord{ + 9, + 10, + 120, + }, + dictWord{9, 10, 732}, + dictWord{10, 10, 473}, + dictWord{11, 10, 656}, + dictWord{11, 10, 998}, + dictWord{18, 10, 0}, + dictWord{18, 10, 2}, + dictWord{ + 147, + 10, + 21, + }, + dictWord{5, 11, 325}, + dictWord{7, 11, 1483}, + dictWord{8, 11, 5}, + dictWord{8, 11, 227}, + dictWord{9, 11, 105}, + dictWord{10, 11, 585}, + dictWord{ + 140, + 11, + 614, + }, + dictWord{136, 0, 122}, + dictWord{132, 0, 234}, + dictWord{135, 11, 1196}, + dictWord{6, 0, 976}, + dictWord{6, 0, 1098}, + dictWord{134, 0, 1441}, + dictWord{ + 7, + 0, + 253, + }, + dictWord{136, 0, 549}, + dictWord{6, 11, 621}, + dictWord{13, 11, 504}, + dictWord{144, 11, 19}, + dictWord{132, 10, 519}, + dictWord{5, 0, 430}, + dictWord{ + 5, + 0, + 932, + }, + dictWord{6, 0, 131}, + dictWord{7, 0, 417}, + dictWord{9, 0, 522}, + dictWord{11, 0, 314}, + dictWord{141, 0, 390}, + dictWord{14, 0, 149}, + dictWord{14, 0, 399}, + dictWord{143, 0, 57}, + dictWord{5, 10, 907}, + dictWord{6, 10, 31}, + dictWord{6, 11, 218}, + dictWord{7, 10, 491}, + dictWord{7, 10, 530}, + dictWord{8, 10, 592}, + dictWord{11, 10, 53}, + dictWord{11, 10, 779}, + dictWord{12, 10, 167}, + dictWord{12, 10, 411}, + dictWord{14, 10, 14}, + dictWord{14, 10, 136}, + dictWord{15, 10, 72}, + dictWord{16, 10, 17}, + dictWord{144, 10, 72}, + dictWord{140, 11, 330}, + dictWord{7, 11, 454}, + dictWord{7, 11, 782}, + dictWord{136, 11, 768}, + dictWord{ + 132, + 0, + 507, + }, + dictWord{10, 11, 676}, + dictWord{140, 11, 462}, + dictWord{6, 0, 630}, + dictWord{9, 0, 811}, + dictWord{4, 10, 208}, + dictWord{5, 10, 106}, + dictWord{ + 6, + 10, + 531, + }, + dictWord{8, 10, 408}, + dictWord{9, 10, 188}, + dictWord{138, 10, 572}, + dictWord{4, 0, 343}, + dictWord{5, 0, 511}, + dictWord{134, 10, 1693}, + dictWord{ + 134, + 11, + 164, + }, + dictWord{132, 0, 448}, + dictWord{7, 0, 455}, + dictWord{138, 0, 591}, + dictWord{135, 0, 1381}, + dictWord{12, 10, 441}, + dictWord{150, 11, 50}, + dictWord{9, 10, 449}, + dictWord{10, 10, 192}, + dictWord{138, 10, 740}, + dictWord{6, 0, 575}, + dictWord{132, 10, 241}, + dictWord{134, 0, 1175}, + dictWord{ + 134, + 0, + 653, + }, + dictWord{134, 0, 1761}, + dictWord{134, 0, 1198}, + dictWord{132, 10, 259}, + dictWord{6, 11, 343}, + dictWord{7, 11, 195}, + dictWord{9, 11, 226}, + dictWord{ + 10, + 11, + 197, + }, + dictWord{10, 11, 575}, + dictWord{11, 11, 502}, + dictWord{139, 11, 899}, + dictWord{7, 0, 1127}, + dictWord{7, 0, 1572}, + dictWord{10, 0, 297}, + dictWord{10, 0, 422}, + dictWord{11, 0, 764}, + dictWord{11, 0, 810}, + dictWord{12, 0, 264}, + dictWord{13, 0, 102}, + dictWord{13, 0, 300}, + dictWord{13, 0, 484}, + dictWord{ + 14, + 0, + 147, + }, + dictWord{14, 0, 229}, + dictWord{17, 0, 71}, + dictWord{18, 0, 118}, + dictWord{147, 0, 120}, + dictWord{135, 11, 666}, + dictWord{132, 0, 678}, + dictWord{ + 4, + 10, + 173, + }, + dictWord{5, 10, 312}, + dictWord{5, 10, 512}, + dictWord{135, 10, 1285}, + dictWord{7, 10, 1603}, + dictWord{7, 10, 1691}, + dictWord{9, 10, 464}, + dictWord{11, 10, 195}, + dictWord{12, 10, 279}, + dictWord{12, 10, 448}, + dictWord{14, 10, 11}, + dictWord{147, 10, 102}, + dictWord{16, 0, 99}, + dictWord{146, 0, 164}, + dictWord{7, 11, 1125}, + dictWord{9, 11, 143}, + dictWord{11, 11, 61}, + dictWord{14, 11, 405}, + dictWord{150, 11, 21}, + dictWord{137, 11, 260}, + dictWord{ + 4, + 10, + 452, + }, + dictWord{5, 10, 583}, + dictWord{5, 10, 817}, + dictWord{6, 10, 433}, + dictWord{7, 10, 593}, + dictWord{7, 10, 720}, + dictWord{7, 10, 1378}, + dictWord{ + 8, + 10, + 161, + }, + dictWord{9, 10, 284}, + dictWord{10, 10, 313}, + dictWord{139, 10, 886}, + dictWord{132, 10, 547}, + dictWord{136, 10, 722}, + dictWord{14, 0, 35}, + dictWord{142, 0, 191}, + dictWord{141, 0, 45}, + dictWord{138, 0, 121}, + dictWord{132, 0, 125}, + dictWord{134, 0, 1622}, + dictWord{133, 11, 959}, + dictWord{ + 8, + 10, + 420, + }, + dictWord{139, 10, 193}, + dictWord{132, 0, 721}, + dictWord{135, 10, 409}, + dictWord{136, 0, 145}, + dictWord{7, 0, 792}, + dictWord{8, 0, 147}, + dictWord{ + 10, + 0, + 821, + }, + dictWord{11, 0, 970}, + dictWord{11, 0, 1021}, + dictWord{136, 11, 173}, + dictWord{134, 11, 266}, + dictWord{132, 0, 715}, + dictWord{7, 0, 1999}, + dictWord{138, 10, 308}, + dictWord{133, 0, 531}, + dictWord{5, 0, 168}, + dictWord{5, 0, 930}, + dictWord{8, 0, 74}, + dictWord{9, 0, 623}, + dictWord{12, 0, 500}, + dictWord{ + 140, + 0, + 579, + }, + dictWord{144, 0, 65}, + dictWord{138, 11, 246}, + dictWord{6, 0, 220}, + dictWord{7, 0, 1101}, + dictWord{13, 0, 105}, + dictWord{142, 11, 314}, + dictWord{ + 5, + 10, + 1002, + }, + dictWord{136, 10, 745}, + dictWord{134, 0, 960}, + dictWord{20, 0, 0}, + dictWord{148, 11, 0}, + dictWord{4, 0, 1005}, + dictWord{4, 10, 239}, + dictWord{ + 6, + 10, + 477, + }, + dictWord{7, 10, 1607}, + dictWord{11, 10, 68}, + dictWord{139, 10, 617}, + dictWord{6, 0, 19}, + dictWord{7, 0, 1413}, + dictWord{139, 0, 428}, + dictWord{ + 149, + 10, + 13, + }, + dictWord{7, 0, 96}, + dictWord{8, 0, 401}, + dictWord{8, 0, 703}, + dictWord{9, 0, 896}, + dictWord{136, 11, 300}, + dictWord{134, 0, 1595}, + dictWord{145, 0, 116}, + dictWord{136, 0, 1021}, + dictWord{7, 0, 1961}, + dictWord{7, 0, 1965}, + dictWord{7, 0, 2030}, + dictWord{8, 0, 150}, + dictWord{8, 0, 702}, + dictWord{8, 0, 737}, + dictWord{ + 8, + 0, + 750, + }, + dictWord{140, 0, 366}, + dictWord{11, 11, 75}, + dictWord{142, 11, 267}, + dictWord{132, 10, 367}, + dictWord{8, 0, 800}, + dictWord{9, 0, 148}, + dictWord{ + 9, + 0, + 872, + }, + dictWord{9, 0, 890}, + dictWord{11, 0, 309}, + dictWord{11, 0, 1001}, + dictWord{13, 0, 267}, + dictWord{13, 0, 323}, + dictWord{5, 11, 427}, + dictWord{ + 5, + 11, + 734, + }, + dictWord{7, 11, 478}, + dictWord{136, 11, 52}, + dictWord{7, 11, 239}, + dictWord{11, 11, 217}, + dictWord{142, 11, 165}, + dictWord{132, 11, 323}, + dictWord{140, 11, 419}, + dictWord{13, 0, 299}, + dictWord{142, 0, 75}, + dictWord{6, 11, 87}, + dictWord{6, 11, 1734}, + dictWord{7, 11, 20}, + dictWord{7, 11, 1056}, + dictWord{ + 8, + 11, + 732, + }, + dictWord{9, 11, 406}, + dictWord{9, 11, 911}, + dictWord{138, 11, 694}, + dictWord{134, 0, 1383}, + dictWord{132, 10, 694}, + dictWord{ + 133, + 11, + 613, + }, + dictWord{137, 0, 779}, + dictWord{4, 0, 598}, + dictWord{140, 10, 687}, + dictWord{6, 0, 970}, + dictWord{135, 0, 424}, + dictWord{133, 0, 547}, + dictWord{ + 7, + 11, + 32, + }, + dictWord{7, 11, 984}, + dictWord{8, 11, 85}, + dictWord{8, 11, 709}, + dictWord{9, 11, 579}, + dictWord{9, 11, 847}, + dictWord{9, 11, 856}, + dictWord{10, 11, 799}, + dictWord{11, 11, 258}, + dictWord{11, 11, 1007}, + dictWord{12, 11, 331}, + dictWord{12, 11, 615}, + dictWord{13, 11, 188}, + dictWord{13, 11, 435}, + dictWord{ + 14, + 11, + 8, + }, + dictWord{15, 11, 165}, + dictWord{16, 11, 27}, + dictWord{148, 11, 40}, + dictWord{6, 0, 1222}, + dictWord{134, 0, 1385}, + dictWord{132, 0, 876}, + dictWord{ + 138, + 11, + 151, + }, + dictWord{135, 10, 213}, + dictWord{4, 11, 167}, + dictWord{135, 11, 82}, + dictWord{133, 0, 133}, + dictWord{6, 11, 24}, + dictWord{7, 11, 74}, + dictWord{ + 7, + 11, + 678, + }, + dictWord{137, 11, 258}, + dictWord{5, 11, 62}, + dictWord{6, 11, 534}, + dictWord{7, 11, 684}, + dictWord{7, 11, 1043}, + dictWord{7, 11, 1072}, + dictWord{ + 8, + 11, + 280, + }, + dictWord{8, 11, 541}, + dictWord{8, 11, 686}, + dictWord{10, 11, 519}, + dictWord{11, 11, 252}, + dictWord{140, 11, 282}, + dictWord{136, 0, 187}, + dictWord{8, 0, 8}, + dictWord{10, 0, 0}, + dictWord{10, 0, 818}, + dictWord{139, 0, 988}, + dictWord{132, 11, 359}, + dictWord{11, 0, 429}, + dictWord{15, 0, 51}, + dictWord{ + 135, + 10, + 1672, + }, + dictWord{136, 0, 685}, + dictWord{5, 11, 211}, + dictWord{7, 11, 88}, + dictWord{136, 11, 627}, + dictWord{134, 0, 472}, + dictWord{136, 0, 132}, + dictWord{ + 6, + 11, + 145, + }, + dictWord{141, 11, 336}, + dictWord{4, 10, 751}, + dictWord{11, 10, 390}, + dictWord{140, 10, 32}, + dictWord{6, 0, 938}, + dictWord{6, 0, 1060}, + dictWord{ + 4, + 11, + 263, + }, + dictWord{4, 10, 409}, + dictWord{133, 10, 78}, + dictWord{137, 0, 874}, + dictWord{8, 0, 774}, + dictWord{10, 0, 670}, + dictWord{12, 0, 51}, + dictWord{ + 4, + 11, + 916, + }, + dictWord{6, 10, 473}, + dictWord{7, 10, 1602}, + dictWord{10, 10, 698}, + dictWord{12, 10, 212}, + dictWord{13, 10, 307}, + dictWord{145, 10, 105}, + dictWord{146, 0, 92}, + dictWord{143, 10, 156}, + dictWord{132, 0, 830}, + dictWord{137, 0, 701}, + dictWord{4, 11, 599}, + dictWord{6, 11, 1634}, + dictWord{7, 11, 5}, + dictWord{7, 11, 55}, + dictWord{7, 11, 67}, + dictWord{7, 11, 97}, + dictWord{7, 11, 691}, + dictWord{7, 11, 979}, + dictWord{7, 11, 1697}, + dictWord{8, 11, 207}, + dictWord{ + 8, + 11, + 214, + }, + dictWord{8, 11, 231}, + dictWord{8, 11, 294}, + dictWord{8, 11, 336}, + dictWord{8, 11, 428}, + dictWord{8, 11, 451}, + dictWord{8, 11, 460}, + dictWord{8, 11, 471}, + dictWord{8, 11, 622}, + dictWord{8, 11, 626}, + dictWord{8, 11, 679}, + dictWord{8, 11, 759}, + dictWord{8, 11, 829}, + dictWord{9, 11, 11}, + dictWord{9, 11, 246}, + dictWord{ + 9, + 11, + 484, + }, + dictWord{9, 11, 573}, + dictWord{9, 11, 706}, + dictWord{9, 11, 762}, + dictWord{9, 11, 798}, + dictWord{9, 11, 855}, + dictWord{9, 11, 870}, + dictWord{ + 9, + 11, + 912, + }, + dictWord{10, 11, 303}, + dictWord{10, 11, 335}, + dictWord{10, 11, 424}, + dictWord{10, 11, 461}, + dictWord{10, 11, 543}, + dictWord{10, 11, 759}, + dictWord{10, 11, 814}, + dictWord{11, 11, 59}, + dictWord{11, 11, 199}, + dictWord{11, 11, 235}, + dictWord{11, 11, 475}, + dictWord{11, 11, 590}, + dictWord{11, 11, 929}, + dictWord{11, 11, 963}, + dictWord{12, 11, 114}, + dictWord{12, 11, 182}, + dictWord{12, 11, 226}, + dictWord{12, 11, 332}, + dictWord{12, 11, 439}, + dictWord{ + 12, + 11, + 575, + }, + dictWord{12, 11, 598}, + dictWord{13, 11, 8}, + dictWord{13, 11, 125}, + dictWord{13, 11, 194}, + dictWord{13, 11, 287}, + dictWord{14, 11, 197}, + dictWord{ + 14, + 11, + 383, + }, + dictWord{15, 11, 53}, + dictWord{17, 11, 63}, + dictWord{19, 11, 46}, + dictWord{19, 11, 98}, + dictWord{19, 11, 106}, + dictWord{148, 11, 85}, + dictWord{ + 4, + 0, + 127, + }, + dictWord{5, 0, 350}, + dictWord{6, 0, 356}, + dictWord{8, 0, 426}, + dictWord{9, 0, 572}, + dictWord{10, 0, 247}, + dictWord{139, 0, 312}, + dictWord{134, 0, 1215}, + dictWord{6, 0, 59}, + dictWord{9, 0, 603}, + dictWord{13, 0, 397}, + dictWord{7, 11, 1853}, + dictWord{138, 11, 437}, + dictWord{134, 0, 1762}, + dictWord{ + 147, + 11, + 126, + }, + dictWord{135, 10, 883}, + dictWord{13, 0, 293}, + dictWord{142, 0, 56}, + dictWord{133, 10, 617}, + dictWord{139, 10, 50}, + dictWord{5, 11, 187}, + dictWord{ + 7, + 10, + 1518, + }, + dictWord{139, 10, 694}, + dictWord{135, 0, 441}, + dictWord{6, 0, 111}, + dictWord{7, 0, 4}, + dictWord{8, 0, 163}, + dictWord{8, 0, 776}, + dictWord{ + 138, + 0, + 566, + }, + dictWord{132, 0, 806}, + dictWord{4, 11, 215}, + dictWord{9, 11, 38}, + dictWord{10, 11, 3}, + dictWord{11, 11, 23}, + dictWord{11, 11, 127}, + dictWord{ + 139, + 11, + 796, + }, + dictWord{14, 0, 233}, + dictWord{4, 10, 546}, + dictWord{135, 10, 2042}, + dictWord{135, 0, 1994}, + dictWord{134, 0, 1739}, + dictWord{135, 11, 1530}, + dictWord{136, 0, 393}, + dictWord{5, 0, 297}, + dictWord{7, 0, 1038}, + dictWord{14, 0, 359}, + dictWord{19, 0, 52}, + dictWord{148, 0, 47}, + dictWord{135, 0, 309}, + dictWord{ + 4, + 10, + 313, + }, + dictWord{133, 10, 577}, + dictWord{8, 10, 184}, + dictWord{141, 10, 433}, + dictWord{135, 10, 935}, + dictWord{12, 10, 186}, + dictWord{ + 12, + 10, + 292, + }, + dictWord{14, 10, 100}, + dictWord{146, 10, 70}, + dictWord{136, 0, 363}, + dictWord{14, 0, 175}, + dictWord{11, 10, 402}, + dictWord{12, 10, 109}, + dictWord{ + 12, + 10, + 431, + }, + dictWord{13, 10, 179}, + dictWord{13, 10, 206}, + dictWord{14, 10, 217}, + dictWord{16, 10, 3}, + dictWord{148, 10, 53}, + dictWord{5, 10, 886}, + dictWord{ + 6, + 10, + 46, + }, + dictWord{6, 10, 1790}, + dictWord{7, 10, 14}, + dictWord{7, 10, 732}, + dictWord{7, 10, 1654}, + dictWord{8, 10, 95}, + dictWord{8, 10, 327}, + dictWord{ + 8, + 10, + 616, + }, + dictWord{9, 10, 892}, + dictWord{10, 10, 598}, + dictWord{10, 10, 769}, + dictWord{11, 10, 134}, + dictWord{11, 10, 747}, + dictWord{12, 10, 378}, + dictWord{ + 142, + 10, + 97, + }, + dictWord{136, 0, 666}, + dictWord{135, 0, 1675}, + dictWord{6, 0, 655}, + dictWord{134, 0, 1600}, + dictWord{135, 0, 808}, + dictWord{133, 10, 1021}, + dictWord{4, 11, 28}, + dictWord{5, 11, 440}, + dictWord{7, 11, 248}, + dictWord{11, 11, 833}, + dictWord{140, 11, 344}, + dictWord{134, 11, 1654}, + dictWord{ + 132, + 0, + 280, + }, + dictWord{140, 0, 54}, + dictWord{4, 0, 421}, + dictWord{133, 0, 548}, + dictWord{132, 10, 153}, + dictWord{6, 11, 339}, + dictWord{135, 11, 923}, + dictWord{ + 133, + 11, + 853, + }, + dictWord{133, 10, 798}, + dictWord{132, 10, 587}, + dictWord{6, 11, 249}, + dictWord{7, 11, 1234}, + dictWord{139, 11, 573}, + dictWord{6, 10, 598}, + dictWord{7, 10, 42}, + dictWord{8, 10, 695}, + dictWord{10, 10, 212}, + dictWord{11, 10, 158}, + dictWord{14, 10, 196}, + dictWord{145, 10, 85}, + dictWord{7, 0, 249}, + dictWord{5, 10, 957}, + dictWord{133, 10, 1008}, + dictWord{4, 10, 129}, + dictWord{135, 10, 465}, + dictWord{6, 0, 254}, + dictWord{7, 0, 842}, + dictWord{7, 0, 1659}, + dictWord{9, 0, 109}, + dictWord{10, 0, 103}, + dictWord{7, 10, 908}, + dictWord{7, 10, 1201}, + dictWord{9, 10, 755}, + dictWord{11, 10, 906}, + dictWord{12, 10, 527}, + dictWord{146, 10, 7}, + dictWord{5, 0, 262}, + dictWord{136, 10, 450}, + dictWord{144, 0, 1}, + dictWord{10, 11, 201}, + dictWord{142, 11, 319}, + dictWord{7, 11, 49}, + dictWord{ + 7, + 11, + 392, + }, + dictWord{8, 11, 20}, + dictWord{8, 11, 172}, + dictWord{8, 11, 690}, + dictWord{9, 11, 383}, + dictWord{9, 11, 845}, + dictWord{10, 11, 48}, + dictWord{ + 11, + 11, + 293, + }, + dictWord{11, 11, 832}, + dictWord{11, 11, 920}, + dictWord{141, 11, 221}, + dictWord{5, 11, 858}, + dictWord{133, 11, 992}, + dictWord{134, 0, 805}, + dictWord{139, 10, 1003}, + dictWord{6, 0, 1630}, + dictWord{134, 11, 307}, + dictWord{7, 11, 1512}, + dictWord{135, 11, 1794}, + dictWord{6, 11, 268}, + dictWord{ + 137, + 11, + 62, + }, + dictWord{135, 10, 1868}, + dictWord{133, 0, 671}, + dictWord{4, 0, 989}, + dictWord{8, 0, 972}, + dictWord{136, 0, 998}, + dictWord{132, 11, 423}, + dictWord{132, 0, 889}, + dictWord{135, 0, 1382}, + dictWord{135, 0, 1910}, + dictWord{7, 10, 965}, + dictWord{7, 10, 1460}, + dictWord{135, 10, 1604}, + dictWord{ + 4, + 0, + 627, + }, + dictWord{5, 0, 775}, + dictWord{138, 11, 106}, + dictWord{134, 11, 348}, + dictWord{7, 0, 202}, + dictWord{11, 0, 362}, + dictWord{11, 0, 948}, + dictWord{ + 140, + 0, + 388, + }, + dictWord{138, 11, 771}, + dictWord{6, 11, 613}, + dictWord{136, 11, 223}, + dictWord{6, 0, 560}, + dictWord{7, 0, 451}, + dictWord{8, 0, 389}, + dictWord{ + 12, + 0, + 490, + }, + dictWord{13, 0, 16}, + dictWord{13, 0, 215}, + dictWord{13, 0, 351}, + dictWord{18, 0, 132}, + dictWord{147, 0, 125}, + dictWord{135, 0, 841}, + dictWord{ + 136, + 0, + 566, + }, + dictWord{136, 0, 938}, + dictWord{132, 11, 670}, + dictWord{5, 0, 912}, + dictWord{6, 0, 1695}, + dictWord{140, 11, 55}, + dictWord{9, 11, 40}, + dictWord{ + 139, + 11, + 136, + }, + dictWord{7, 0, 1361}, + dictWord{7, 10, 982}, + dictWord{10, 10, 32}, + dictWord{143, 10, 56}, + dictWord{11, 11, 259}, + dictWord{140, 11, 270}, + dictWord{ + 5, + 0, + 236, + }, + dictWord{6, 0, 572}, + dictWord{8, 0, 492}, + dictWord{11, 0, 618}, + dictWord{144, 0, 56}, + dictWord{8, 11, 572}, + dictWord{9, 11, 310}, + dictWord{9, 11, 682}, + dictWord{137, 11, 698}, + dictWord{134, 0, 1854}, + dictWord{5, 0, 190}, + dictWord{136, 0, 318}, + dictWord{133, 10, 435}, + dictWord{135, 0, 1376}, + dictWord{ + 4, + 11, + 296, + }, + dictWord{6, 11, 352}, + dictWord{7, 11, 401}, + dictWord{7, 11, 1410}, + dictWord{7, 11, 1594}, + dictWord{7, 11, 1674}, + dictWord{8, 11, 63}, + dictWord{ + 8, + 11, + 660, + }, + dictWord{137, 11, 74}, + dictWord{7, 0, 349}, + dictWord{5, 10, 85}, + dictWord{6, 10, 419}, + dictWord{7, 10, 305}, + dictWord{7, 10, 361}, + dictWord{7, 10, 1337}, + dictWord{8, 10, 71}, + dictWord{140, 10, 519}, + dictWord{4, 11, 139}, + dictWord{4, 11, 388}, + dictWord{140, 11, 188}, + dictWord{6, 0, 1972}, + dictWord{6, 0, 2013}, + dictWord{8, 0, 951}, + dictWord{10, 0, 947}, + dictWord{10, 0, 974}, + dictWord{10, 0, 1018}, + dictWord{142, 0, 476}, + dictWord{140, 10, 688}, + dictWord{ + 135, + 10, + 740, + }, + dictWord{5, 10, 691}, + dictWord{7, 10, 345}, + dictWord{9, 10, 94}, + dictWord{140, 10, 169}, + dictWord{9, 0, 344}, + dictWord{5, 10, 183}, + dictWord{6, 10, 582}, + dictWord{10, 10, 679}, + dictWord{140, 10, 435}, + dictWord{135, 10, 511}, + dictWord{132, 0, 850}, + dictWord{8, 11, 441}, + dictWord{10, 11, 314}, + dictWord{ + 143, + 11, + 3, + }, + dictWord{7, 10, 1993}, + dictWord{136, 10, 684}, + dictWord{4, 11, 747}, + dictWord{6, 11, 290}, + dictWord{6, 10, 583}, + dictWord{7, 11, 649}, + dictWord{ + 7, + 11, + 1479, + }, + dictWord{135, 11, 1583}, + dictWord{133, 11, 232}, + dictWord{133, 10, 704}, + dictWord{134, 0, 910}, + dictWord{4, 10, 179}, + dictWord{5, 10, 198}, + dictWord{133, 10, 697}, + dictWord{7, 10, 347}, + dictWord{7, 10, 971}, + dictWord{8, 10, 181}, + dictWord{138, 10, 711}, + dictWord{136, 11, 525}, + dictWord{ + 14, + 0, + 19, + }, + dictWord{14, 0, 28}, + dictWord{144, 0, 29}, + dictWord{7, 0, 85}, + dictWord{7, 0, 247}, + dictWord{8, 0, 585}, + dictWord{138, 0, 163}, + dictWord{4, 0, 487}, + dictWord{ + 7, + 11, + 472, + }, + dictWord{7, 11, 1801}, + dictWord{10, 11, 748}, + dictWord{141, 11, 458}, + dictWord{4, 10, 243}, + dictWord{5, 10, 203}, + dictWord{7, 10, 19}, + dictWord{ + 7, + 10, + 71, + }, + dictWord{7, 10, 113}, + dictWord{10, 10, 405}, + dictWord{11, 10, 357}, + dictWord{142, 10, 240}, + dictWord{7, 10, 1450}, + dictWord{139, 10, 99}, + dictWord{132, 11, 425}, + dictWord{138, 0, 145}, + dictWord{147, 0, 83}, + dictWord{6, 10, 492}, + dictWord{137, 11, 247}, + dictWord{4, 0, 1013}, + dictWord{ + 134, + 0, + 2033, + }, + dictWord{5, 10, 134}, + dictWord{6, 10, 408}, + dictWord{6, 10, 495}, + dictWord{135, 10, 1593}, + dictWord{135, 0, 1922}, + dictWord{134, 11, 1768}, + dictWord{4, 0, 124}, + dictWord{10, 0, 457}, + dictWord{11, 0, 121}, + dictWord{11, 0, 169}, + dictWord{11, 0, 870}, + dictWord{11, 0, 874}, + dictWord{12, 0, 214}, + dictWord{ + 14, + 0, + 187, + }, + dictWord{143, 0, 77}, + dictWord{5, 0, 557}, + dictWord{135, 0, 1457}, + dictWord{139, 0, 66}, + dictWord{5, 11, 943}, + dictWord{6, 11, 1779}, + dictWord{ + 142, + 10, + 4, + }, + dictWord{4, 10, 248}, + dictWord{4, 10, 665}, + dictWord{7, 10, 137}, + dictWord{137, 10, 349}, + dictWord{7, 0, 1193}, + dictWord{5, 11, 245}, + dictWord{ + 6, + 11, + 576, + }, + dictWord{7, 11, 582}, + dictWord{136, 11, 225}, + dictWord{144, 0, 82}, + dictWord{7, 10, 1270}, + dictWord{139, 10, 612}, + dictWord{5, 0, 454}, + dictWord{ + 10, + 0, + 352, + }, + dictWord{138, 11, 352}, + dictWord{18, 0, 57}, + dictWord{5, 10, 371}, + dictWord{135, 10, 563}, + dictWord{135, 0, 1333}, + dictWord{6, 0, 107}, + dictWord{ + 7, + 0, + 638, + }, + dictWord{7, 0, 1632}, + dictWord{9, 0, 396}, + dictWord{134, 11, 610}, + dictWord{5, 0, 370}, + dictWord{134, 0, 1756}, + dictWord{4, 10, 374}, + dictWord{ + 7, + 10, + 547, + }, + dictWord{7, 10, 1700}, + dictWord{7, 10, 1833}, + dictWord{139, 10, 858}, + dictWord{133, 0, 204}, + dictWord{6, 0, 1305}, + dictWord{9, 10, 311}, + dictWord{ + 141, + 10, + 42, + }, + dictWord{5, 0, 970}, + dictWord{134, 0, 1706}, + dictWord{6, 10, 1647}, + dictWord{7, 10, 1552}, + dictWord{7, 10, 2010}, + dictWord{9, 10, 494}, + dictWord{137, 10, 509}, + dictWord{13, 11, 455}, + dictWord{15, 11, 99}, + dictWord{15, 11, 129}, + dictWord{144, 11, 68}, + dictWord{135, 0, 3}, + dictWord{4, 0, 35}, + dictWord{ + 5, + 0, + 121, + }, + dictWord{5, 0, 483}, + dictWord{5, 0, 685}, + dictWord{6, 0, 489}, + dictWord{6, 0, 782}, + dictWord{6, 0, 1032}, + dictWord{7, 0, 1204}, + dictWord{136, 0, 394}, + dictWord{4, 0, 921}, + dictWord{133, 0, 1007}, + dictWord{8, 11, 360}, + dictWord{138, 11, 63}, + dictWord{135, 0, 1696}, + dictWord{134, 0, 1519}, + dictWord{ + 132, + 11, + 443, + }, + dictWord{135, 11, 944}, + dictWord{6, 10, 123}, + dictWord{7, 10, 214}, + dictWord{9, 10, 728}, + dictWord{10, 10, 157}, + dictWord{11, 10, 346}, + dictWord{11, 10, 662}, + dictWord{143, 10, 106}, + dictWord{137, 0, 981}, + dictWord{135, 10, 1435}, + dictWord{134, 0, 1072}, + dictWord{132, 0, 712}, + dictWord{ + 134, + 0, + 1629, + }, + dictWord{134, 0, 728}, + dictWord{4, 11, 298}, + dictWord{137, 11, 483}, + dictWord{6, 0, 1177}, + dictWord{6, 0, 1271}, + dictWord{5, 11, 164}, + dictWord{ + 7, + 11, + 121, + }, + dictWord{142, 11, 189}, + dictWord{7, 0, 1608}, + dictWord{4, 10, 707}, + dictWord{5, 10, 588}, + dictWord{6, 10, 393}, + dictWord{13, 10, 106}, + dictWord{ + 18, + 10, + 49, + }, + dictWord{147, 10, 41}, + dictWord{23, 0, 16}, + dictWord{151, 11, 16}, + dictWord{6, 10, 211}, + dictWord{7, 10, 1690}, + dictWord{11, 10, 486}, + dictWord{140, 10, 369}, + dictWord{133, 0, 485}, + dictWord{19, 11, 15}, + dictWord{149, 11, 27}, + dictWord{4, 11, 172}, + dictWord{9, 11, 611}, + dictWord{10, 11, 436}, + dictWord{12, 11, 673}, + dictWord{141, 11, 255}, + dictWord{5, 11, 844}, + dictWord{10, 11, 484}, + dictWord{11, 11, 754}, + dictWord{12, 11, 457}, + dictWord{ + 14, + 11, + 171, + }, + dictWord{14, 11, 389}, + dictWord{146, 11, 153}, + dictWord{4, 0, 285}, + dictWord{5, 0, 27}, + dictWord{5, 0, 317}, + dictWord{6, 0, 301}, + dictWord{7, 0, 7}, + dictWord{ + 8, + 0, + 153, + }, + dictWord{10, 0, 766}, + dictWord{11, 0, 468}, + dictWord{12, 0, 467}, + dictWord{141, 0, 143}, + dictWord{134, 0, 1462}, + dictWord{9, 11, 263}, + dictWord{ + 10, + 11, + 147, + }, + dictWord{138, 11, 492}, + dictWord{133, 11, 537}, + dictWord{6, 0, 1945}, + dictWord{6, 0, 1986}, + dictWord{6, 0, 1991}, + dictWord{134, 0, 2038}, + dictWord{134, 10, 219}, + dictWord{137, 11, 842}, + dictWord{14, 0, 52}, + dictWord{17, 0, 50}, + dictWord{5, 10, 582}, + dictWord{6, 10, 1646}, + dictWord{7, 10, 99}, + dictWord{7, 10, 1962}, + dictWord{7, 10, 1986}, + dictWord{8, 10, 515}, + dictWord{8, 10, 773}, + dictWord{9, 10, 23}, + dictWord{9, 10, 491}, + dictWord{12, 10, 620}, + dictWord{142, 10, 93}, + dictWord{138, 11, 97}, + dictWord{20, 0, 21}, + dictWord{20, 0, 44}, + dictWord{133, 10, 851}, + dictWord{136, 0, 819}, + dictWord{139, 0, 917}, + dictWord{5, 11, 230}, + dictWord{5, 11, 392}, + dictWord{6, 11, 420}, + dictWord{8, 10, 762}, + dictWord{8, 10, 812}, + dictWord{9, 11, 568}, + dictWord{9, 10, 910}, + dictWord{140, 11, 612}, + dictWord{135, 0, 784}, + dictWord{15, 0, 135}, + dictWord{143, 11, 135}, + dictWord{10, 0, 454}, + dictWord{140, 0, 324}, + dictWord{4, 11, 0}, + dictWord{5, 11, 41}, + dictWord{7, 11, 1459}, + dictWord{7, 11, 1469}, + dictWord{7, 11, 1618}, + dictWord{7, 11, 1859}, + dictWord{9, 11, 549}, + dictWord{139, 11, 905}, + dictWord{4, 10, 98}, + dictWord{7, 10, 1365}, + dictWord{9, 10, 422}, + dictWord{9, 10, 670}, + dictWord{10, 10, 775}, + dictWord{11, 10, 210}, + dictWord{13, 10, 26}, + dictWord{13, 10, 457}, + dictWord{141, 10, 476}, + dictWord{6, 0, 1719}, + dictWord{6, 0, 1735}, + dictWord{7, 0, 2016}, + dictWord{7, 0, 2020}, + dictWord{8, 0, 837}, + dictWord{137, 0, 852}, + dictWord{133, 11, 696}, + dictWord{135, 0, 852}, + dictWord{132, 0, 952}, + dictWord{134, 10, 1730}, + dictWord{132, 11, 771}, + dictWord{ + 138, + 0, + 568, + }, + dictWord{137, 0, 448}, + dictWord{139, 0, 146}, + dictWord{8, 0, 67}, + dictWord{138, 0, 419}, + dictWord{133, 11, 921}, + dictWord{137, 10, 147}, + dictWord{134, 0, 1826}, + dictWord{10, 0, 657}, + dictWord{14, 0, 297}, + dictWord{142, 0, 361}, + dictWord{6, 0, 666}, + dictWord{6, 0, 767}, + dictWord{134, 0, 1542}, + dictWord{139, 0, 729}, + dictWord{6, 11, 180}, + dictWord{7, 11, 1137}, + dictWord{8, 11, 751}, + dictWord{139, 11, 805}, + dictWord{4, 11, 183}, + dictWord{7, 11, 271}, + dictWord{11, 11, 824}, + dictWord{11, 11, 952}, + dictWord{13, 11, 278}, + dictWord{13, 11, 339}, + dictWord{13, 11, 482}, + dictWord{14, 11, 424}, + dictWord{ + 148, + 11, + 99, + }, + dictWord{4, 0, 669}, + dictWord{5, 11, 477}, + dictWord{5, 11, 596}, + dictWord{6, 11, 505}, + dictWord{7, 11, 1221}, + dictWord{11, 11, 907}, + dictWord{ + 12, + 11, + 209, + }, + dictWord{141, 11, 214}, + dictWord{135, 11, 1215}, + dictWord{5, 0, 402}, + dictWord{6, 10, 30}, + dictWord{11, 10, 56}, + dictWord{139, 10, 305}, + dictWord{ + 7, + 11, + 564, + }, + dictWord{142, 11, 168}, + dictWord{139, 0, 152}, + dictWord{7, 0, 912}, + dictWord{135, 10, 1614}, + dictWord{4, 10, 150}, + dictWord{5, 10, 303}, + dictWord{134, 10, 327}, + dictWord{7, 0, 320}, + dictWord{8, 0, 51}, + dictWord{9, 0, 868}, + dictWord{10, 0, 833}, + dictWord{12, 0, 481}, + dictWord{12, 0, 570}, + dictWord{ + 148, + 0, + 106, + }, + dictWord{132, 0, 445}, + dictWord{7, 11, 274}, + dictWord{11, 11, 263}, + dictWord{11, 11, 479}, + dictWord{11, 11, 507}, + dictWord{140, 11, 277}, + dictWord{10, 0, 555}, + dictWord{11, 0, 308}, + dictWord{19, 0, 95}, + dictWord{6, 11, 1645}, + dictWord{8, 10, 192}, + dictWord{10, 10, 78}, + dictWord{141, 10, 359}, + dictWord{135, 10, 786}, + dictWord{6, 11, 92}, + dictWord{6, 11, 188}, + dictWord{7, 11, 1269}, + dictWord{7, 11, 1524}, + dictWord{7, 11, 1876}, + dictWord{10, 11, 228}, + dictWord{139, 11, 1020}, + dictWord{4, 11, 459}, + dictWord{133, 11, 966}, + dictWord{11, 0, 386}, + dictWord{6, 10, 1638}, + dictWord{7, 10, 79}, + dictWord{ + 7, + 10, + 496, + }, + dictWord{9, 10, 138}, + dictWord{10, 10, 336}, + dictWord{12, 10, 412}, + dictWord{12, 10, 440}, + dictWord{142, 10, 305}, + dictWord{133, 0, 239}, + dictWord{ + 7, + 0, + 83, + }, + dictWord{7, 0, 1990}, + dictWord{8, 0, 130}, + dictWord{139, 0, 720}, + dictWord{138, 11, 709}, + dictWord{4, 0, 143}, + dictWord{5, 0, 550}, + dictWord{ + 133, + 0, + 752, + }, + dictWord{5, 0, 123}, + dictWord{6, 0, 530}, + dictWord{7, 0, 348}, + dictWord{135, 0, 1419}, + dictWord{135, 0, 2024}, + dictWord{6, 11, 18}, + dictWord{7, 11, 179}, + dictWord{7, 11, 721}, + dictWord{7, 11, 932}, + dictWord{8, 11, 548}, + dictWord{8, 11, 757}, + dictWord{9, 11, 54}, + dictWord{9, 11, 65}, + dictWord{9, 11, 532}, + dictWord{ + 9, + 11, + 844, + }, + dictWord{10, 11, 113}, + dictWord{10, 11, 117}, + dictWord{10, 11, 236}, + dictWord{10, 11, 315}, + dictWord{10, 11, 430}, + dictWord{10, 11, 798}, + dictWord{11, 11, 153}, + dictWord{11, 11, 351}, + dictWord{11, 11, 375}, + dictWord{12, 11, 78}, + dictWord{12, 11, 151}, + dictWord{12, 11, 392}, + dictWord{ + 14, + 11, + 248, + }, + dictWord{143, 11, 23}, + dictWord{7, 10, 204}, + dictWord{7, 10, 415}, + dictWord{8, 10, 42}, + dictWord{10, 10, 85}, + dictWord{139, 10, 564}, + dictWord{ + 134, + 0, + 958, + }, + dictWord{133, 11, 965}, + dictWord{132, 0, 210}, + dictWord{135, 11, 1429}, + dictWord{138, 11, 480}, + dictWord{134, 11, 182}, + dictWord{ + 139, + 11, + 345, + }, + dictWord{10, 11, 65}, + dictWord{10, 11, 488}, + dictWord{138, 11, 497}, + dictWord{4, 10, 3}, + dictWord{5, 10, 247}, + dictWord{5, 10, 644}, + dictWord{ + 7, + 10, + 744, + }, + dictWord{7, 10, 1207}, + dictWord{7, 10, 1225}, + dictWord{7, 10, 1909}, + dictWord{146, 10, 147}, + dictWord{132, 0, 430}, + dictWord{5, 10, 285}, + dictWord{ + 9, + 10, + 67, + }, + dictWord{13, 10, 473}, + dictWord{143, 10, 82}, + dictWord{144, 11, 16}, + dictWord{7, 11, 1162}, + dictWord{9, 11, 588}, + dictWord{10, 11, 260}, + dictWord{151, 10, 8}, + dictWord{133, 0, 213}, + dictWord{138, 0, 7}, + dictWord{135, 0, 801}, + dictWord{134, 11, 1786}, + dictWord{135, 11, 308}, + dictWord{6, 0, 936}, + dictWord{134, 0, 1289}, + dictWord{133, 0, 108}, + dictWord{132, 0, 885}, + dictWord{133, 0, 219}, + dictWord{139, 0, 587}, + dictWord{4, 0, 193}, + dictWord{5, 0, 916}, + dictWord{6, 0, 1041}, + dictWord{7, 0, 364}, + dictWord{10, 0, 398}, + dictWord{10, 0, 726}, + dictWord{11, 0, 317}, + dictWord{11, 0, 626}, + dictWord{12, 0, 142}, + dictWord{12, 0, 288}, + dictWord{12, 0, 678}, + dictWord{13, 0, 313}, + dictWord{15, 0, 113}, + dictWord{146, 0, 114}, + dictWord{135, 0, 1165}, + dictWord{6, 0, 241}, + dictWord{ + 9, + 0, + 342, + }, + dictWord{10, 0, 729}, + dictWord{11, 0, 284}, + dictWord{11, 0, 445}, + dictWord{11, 0, 651}, + dictWord{11, 0, 863}, + dictWord{13, 0, 398}, + dictWord{ + 146, + 0, + 99, + }, + dictWord{7, 0, 907}, + dictWord{136, 0, 832}, + dictWord{9, 0, 303}, + dictWord{4, 10, 29}, + dictWord{6, 10, 532}, + dictWord{7, 10, 1628}, + dictWord{7, 10, 1648}, + dictWord{9, 10, 350}, + dictWord{10, 10, 433}, + dictWord{11, 10, 97}, + dictWord{11, 10, 557}, + dictWord{11, 10, 745}, + dictWord{12, 10, 289}, + dictWord{ + 12, + 10, + 335, + }, + dictWord{12, 10, 348}, + dictWord{12, 10, 606}, + dictWord{13, 10, 116}, + dictWord{13, 10, 233}, + dictWord{13, 10, 466}, + dictWord{14, 10, 181}, + dictWord{ + 14, + 10, + 209, + }, + dictWord{14, 10, 232}, + dictWord{14, 10, 236}, + dictWord{14, 10, 300}, + dictWord{16, 10, 41}, + dictWord{148, 10, 97}, + dictWord{7, 11, 423}, + dictWord{7, 10, 1692}, + dictWord{136, 11, 588}, + dictWord{6, 0, 931}, + dictWord{134, 0, 1454}, + dictWord{5, 10, 501}, + dictWord{7, 10, 1704}, + dictWord{9, 10, 553}, + dictWord{11, 10, 520}, + dictWord{12, 10, 557}, + dictWord{141, 10, 249}, + dictWord{136, 11, 287}, + dictWord{4, 0, 562}, + dictWord{9, 0, 254}, + dictWord{ + 139, + 0, + 879, + }, + dictWord{132, 0, 786}, + dictWord{14, 11, 32}, + dictWord{18, 11, 85}, + dictWord{20, 11, 2}, + dictWord{152, 11, 16}, + dictWord{135, 0, 1294}, + dictWord{ + 7, + 11, + 723, + }, + dictWord{135, 11, 1135}, + dictWord{6, 0, 216}, + dictWord{7, 0, 901}, + dictWord{7, 0, 1343}, + dictWord{8, 0, 493}, + dictWord{134, 11, 403}, + dictWord{ + 7, + 11, + 719, + }, + dictWord{8, 11, 809}, + dictWord{136, 11, 834}, + dictWord{5, 11, 210}, + dictWord{6, 11, 213}, + dictWord{7, 11, 60}, + dictWord{10, 11, 364}, + dictWord{ + 139, + 11, + 135, + }, + dictWord{7, 0, 341}, + dictWord{11, 0, 219}, + dictWord{5, 11, 607}, + dictWord{8, 11, 326}, + dictWord{136, 11, 490}, + dictWord{4, 11, 701}, + dictWord{ + 5, + 11, + 472, + }, + dictWord{5, 11, 639}, + dictWord{7, 11, 1249}, + dictWord{9, 11, 758}, + dictWord{139, 11, 896}, + dictWord{135, 11, 380}, + dictWord{135, 11, 1947}, + dictWord{139, 0, 130}, + dictWord{135, 0, 1734}, + dictWord{10, 0, 115}, + dictWord{11, 0, 420}, + dictWord{12, 0, 154}, + dictWord{13, 0, 404}, + dictWord{14, 0, 346}, + dictWord{143, 0, 54}, + dictWord{134, 10, 129}, + dictWord{4, 11, 386}, + dictWord{7, 11, 41}, + dictWord{8, 11, 405}, + dictWord{9, 11, 497}, + dictWord{11, 11, 110}, + dictWord{11, 11, 360}, + dictWord{15, 11, 37}, + dictWord{144, 11, 84}, + dictWord{141, 11, 282}, + dictWord{5, 11, 46}, + dictWord{7, 11, 1452}, + dictWord{7, 11, 1480}, + dictWord{8, 11, 634}, + dictWord{140, 11, 472}, + dictWord{4, 11, 524}, + dictWord{136, 11, 810}, + dictWord{10, 11, 238}, + dictWord{141, 11, 33}, + dictWord{ + 133, + 0, + 604, + }, + dictWord{5, 0, 1011}, + dictWord{136, 0, 701}, + dictWord{8, 0, 856}, + dictWord{8, 0, 858}, + dictWord{8, 0, 879}, + dictWord{12, 0, 702}, + dictWord{142, 0, 447}, + dictWord{4, 0, 54}, + dictWord{5, 0, 666}, + dictWord{7, 0, 1039}, + dictWord{7, 0, 1130}, + dictWord{9, 0, 195}, + dictWord{138, 0, 302}, + dictWord{4, 10, 25}, + dictWord{ + 5, + 10, + 60, + }, + dictWord{6, 10, 504}, + dictWord{7, 10, 614}, + dictWord{7, 10, 1155}, + dictWord{140, 10, 0}, + dictWord{7, 10, 1248}, + dictWord{11, 10, 621}, + dictWord{ + 139, + 10, + 702, + }, + dictWord{133, 11, 997}, + dictWord{137, 10, 321}, + dictWord{134, 0, 1669}, + dictWord{134, 0, 1791}, + dictWord{4, 10, 379}, + dictWord{ + 135, + 10, + 1397, + }, + dictWord{138, 11, 372}, + dictWord{5, 11, 782}, + dictWord{5, 11, 829}, + dictWord{134, 11, 1738}, + dictWord{135, 0, 1228}, + dictWord{4, 10, 118}, + dictWord{6, 10, 274}, + dictWord{6, 10, 361}, + dictWord{7, 10, 75}, + dictWord{141, 10, 441}, + dictWord{132, 0, 623}, + dictWord{9, 11, 279}, + dictWord{10, 11, 407}, + dictWord{14, 11, 84}, + dictWord{150, 11, 18}, + dictWord{137, 10, 841}, + dictWord{135, 0, 798}, + dictWord{140, 10, 693}, + dictWord{5, 10, 314}, + dictWord{6, 10, 221}, + dictWord{7, 10, 419}, + dictWord{10, 10, 650}, + dictWord{11, 10, 396}, + dictWord{12, 10, 156}, + dictWord{13, 10, 369}, + dictWord{14, 10, 333}, + dictWord{ + 145, + 10, + 47, + }, + dictWord{135, 11, 1372}, + dictWord{7, 0, 122}, + dictWord{9, 0, 259}, + dictWord{10, 0, 84}, + dictWord{11, 0, 470}, + dictWord{12, 0, 541}, + dictWord{ + 141, + 0, + 379, + }, + dictWord{134, 0, 837}, + dictWord{8, 0, 1013}, + dictWord{4, 11, 78}, + dictWord{5, 11, 96}, + dictWord{5, 11, 182}, + dictWord{7, 11, 1724}, + dictWord{ + 7, + 11, + 1825, + }, + dictWord{10, 11, 394}, + dictWord{10, 11, 471}, + dictWord{11, 11, 532}, + dictWord{14, 11, 340}, + dictWord{145, 11, 88}, + dictWord{134, 0, 577}, + dictWord{135, 11, 1964}, + dictWord{132, 10, 913}, + dictWord{134, 0, 460}, + dictWord{8, 0, 891}, + dictWord{10, 0, 901}, + dictWord{10, 0, 919}, + dictWord{10, 0, 932}, + dictWord{12, 0, 715}, + dictWord{12, 0, 728}, + dictWord{12, 0, 777}, + dictWord{14, 0, 457}, + dictWord{144, 0, 103}, + dictWord{5, 0, 82}, + dictWord{5, 0, 131}, + dictWord{ + 7, + 0, + 1755, + }, + dictWord{8, 0, 31}, + dictWord{9, 0, 168}, + dictWord{9, 0, 764}, + dictWord{139, 0, 869}, + dictWord{136, 10, 475}, + dictWord{6, 0, 605}, + dictWord{ + 5, + 10, + 1016, + }, + dictWord{9, 11, 601}, + dictWord{9, 11, 619}, + dictWord{10, 11, 505}, + dictWord{10, 11, 732}, + dictWord{11, 11, 355}, + dictWord{140, 11, 139}, + dictWord{ + 7, + 10, + 602, + }, + dictWord{8, 10, 179}, + dictWord{10, 10, 781}, + dictWord{140, 10, 126}, + dictWord{134, 0, 1246}, + dictWord{6, 10, 329}, + dictWord{138, 10, 111}, + dictWord{6, 11, 215}, + dictWord{7, 11, 1028}, + dictWord{7, 11, 1473}, + dictWord{7, 11, 1721}, + dictWord{9, 11, 424}, + dictWord{138, 11, 779}, + dictWord{5, 0, 278}, + dictWord{137, 0, 68}, + dictWord{6, 0, 932}, + dictWord{6, 0, 1084}, + dictWord{144, 0, 86}, + dictWord{4, 0, 163}, + dictWord{5, 0, 201}, + dictWord{5, 0, 307}, + dictWord{ + 5, + 0, + 310, + }, + dictWord{6, 0, 335}, + dictWord{7, 0, 284}, + dictWord{7, 0, 1660}, + dictWord{136, 0, 165}, + dictWord{136, 0, 781}, + dictWord{134, 0, 707}, + dictWord{6, 0, 33}, + dictWord{135, 0, 1244}, + dictWord{5, 10, 821}, + dictWord{6, 11, 67}, + dictWord{6, 10, 1687}, + dictWord{7, 11, 258}, + dictWord{7, 11, 1630}, + dictWord{9, 11, 354}, + dictWord{9, 11, 675}, + dictWord{10, 11, 830}, + dictWord{14, 11, 80}, + dictWord{145, 11, 80}, + dictWord{6, 11, 141}, + dictWord{7, 11, 225}, + dictWord{9, 11, 59}, + dictWord{9, 11, 607}, + dictWord{10, 11, 312}, + dictWord{11, 11, 687}, + dictWord{12, 11, 555}, + dictWord{13, 11, 373}, + dictWord{13, 11, 494}, + dictWord{148, 11, 58}, + dictWord{134, 0, 1113}, + dictWord{9, 0, 388}, + dictWord{5, 10, 71}, + dictWord{7, 10, 1407}, + dictWord{9, 10, 704}, + dictWord{10, 10, 261}, + dictWord{10, 10, 619}, + dictWord{11, 10, 547}, + dictWord{11, 10, 619}, + dictWord{143, 10, 157}, + dictWord{7, 0, 1953}, + dictWord{136, 0, 720}, + dictWord{138, 0, 203}, + dictWord{ + 7, + 10, + 2008, + }, + dictWord{9, 10, 337}, + dictWord{138, 10, 517}, + dictWord{6, 0, 326}, + dictWord{7, 0, 677}, + dictWord{137, 0, 425}, + dictWord{139, 11, 81}, + dictWord{ + 7, + 0, + 1316, + }, + dictWord{7, 0, 1412}, + dictWord{7, 0, 1839}, + dictWord{9, 0, 589}, + dictWord{11, 0, 241}, + dictWord{11, 0, 676}, + dictWord{11, 0, 811}, + dictWord{11, 0, 891}, + dictWord{12, 0, 140}, + dictWord{12, 0, 346}, + dictWord{12, 0, 479}, + dictWord{13, 0, 140}, + dictWord{13, 0, 381}, + dictWord{14, 0, 188}, + dictWord{18, 0, 30}, + dictWord{148, 0, 108}, + dictWord{5, 0, 416}, + dictWord{6, 10, 86}, + dictWord{6, 10, 603}, + dictWord{7, 10, 292}, + dictWord{7, 10, 561}, + dictWord{8, 10, 257}, + dictWord{ + 8, + 10, + 382, + }, + dictWord{9, 10, 721}, + dictWord{9, 10, 778}, + dictWord{11, 10, 581}, + dictWord{140, 10, 466}, + dictWord{4, 10, 486}, + dictWord{133, 10, 491}, + dictWord{134, 0, 1300}, + dictWord{132, 10, 72}, + dictWord{7, 0, 847}, + dictWord{6, 10, 265}, + dictWord{7, 11, 430}, + dictWord{139, 11, 46}, + dictWord{5, 11, 602}, + dictWord{6, 11, 106}, + dictWord{7, 11, 1786}, + dictWord{7, 11, 1821}, + dictWord{7, 11, 2018}, + dictWord{9, 11, 418}, + dictWord{137, 11, 763}, + dictWord{5, 0, 358}, + dictWord{7, 0, 535}, + dictWord{7, 0, 1184}, + dictWord{10, 0, 662}, + dictWord{13, 0, 212}, + dictWord{13, 0, 304}, + dictWord{13, 0, 333}, + dictWord{145, 0, 98}, + dictWord{ + 5, + 11, + 65, + }, + dictWord{6, 11, 416}, + dictWord{7, 11, 1720}, + dictWord{7, 11, 1924}, + dictWord{8, 11, 677}, + dictWord{10, 11, 109}, + dictWord{11, 11, 14}, + dictWord{ + 11, + 11, + 70, + }, + dictWord{11, 11, 569}, + dictWord{11, 11, 735}, + dictWord{15, 11, 153}, + dictWord{148, 11, 80}, + dictWord{6, 0, 1823}, + dictWord{8, 0, 839}, + dictWord{ + 8, + 0, + 852, + }, + dictWord{8, 0, 903}, + dictWord{10, 0, 940}, + dictWord{12, 0, 707}, + dictWord{140, 0, 775}, + dictWord{135, 11, 1229}, + dictWord{6, 0, 1522}, + dictWord{ + 140, + 0, + 654, + }, + dictWord{136, 11, 595}, + dictWord{139, 0, 163}, + dictWord{141, 0, 314}, + dictWord{132, 0, 978}, + dictWord{4, 0, 601}, + dictWord{6, 0, 2035}, + dictWord{137, 10, 234}, + dictWord{5, 10, 815}, + dictWord{6, 10, 1688}, + dictWord{134, 10, 1755}, + dictWord{133, 0, 946}, + dictWord{136, 0, 434}, + dictWord{ + 6, + 10, + 197, + }, + dictWord{136, 10, 205}, + dictWord{7, 0, 411}, + dictWord{7, 0, 590}, + dictWord{8, 0, 631}, + dictWord{9, 0, 323}, + dictWord{10, 0, 355}, + dictWord{11, 0, 491}, + dictWord{12, 0, 143}, + dictWord{12, 0, 402}, + dictWord{13, 0, 73}, + dictWord{14, 0, 408}, + dictWord{15, 0, 107}, + dictWord{146, 0, 71}, + dictWord{7, 0, 1467}, + dictWord{ + 8, + 0, + 328, + }, + dictWord{10, 0, 544}, + dictWord{11, 0, 955}, + dictWord{12, 0, 13}, + dictWord{13, 0, 320}, + dictWord{145, 0, 83}, + dictWord{142, 0, 410}, + dictWord{ + 11, + 0, + 511, + }, + dictWord{13, 0, 394}, + dictWord{14, 0, 298}, + dictWord{14, 0, 318}, + dictWord{146, 0, 103}, + dictWord{6, 10, 452}, + dictWord{7, 10, 312}, + dictWord{ + 138, + 10, + 219, + }, + dictWord{138, 10, 589}, + dictWord{4, 10, 333}, + dictWord{9, 10, 176}, + dictWord{12, 10, 353}, + dictWord{141, 10, 187}, + dictWord{135, 11, 329}, + dictWord{132, 11, 469}, + dictWord{5, 0, 835}, + dictWord{134, 0, 483}, + dictWord{134, 11, 1743}, + dictWord{5, 11, 929}, + dictWord{6, 11, 340}, + dictWord{8, 11, 376}, + dictWord{136, 11, 807}, + dictWord{134, 10, 1685}, + dictWord{132, 0, 677}, + dictWord{5, 11, 218}, + dictWord{7, 11, 1610}, + dictWord{138, 11, 83}, + dictWord{ + 5, + 11, + 571, + }, + dictWord{135, 11, 1842}, + dictWord{132, 11, 455}, + dictWord{137, 0, 70}, + dictWord{135, 0, 1405}, + dictWord{7, 10, 135}, + dictWord{8, 10, 7}, + dictWord{ + 8, + 10, + 62, + }, + dictWord{9, 10, 243}, + dictWord{10, 10, 658}, + dictWord{10, 10, 697}, + dictWord{11, 10, 456}, + dictWord{139, 10, 756}, + dictWord{9, 10, 395}, + dictWord{138, 10, 79}, + dictWord{137, 0, 108}, + dictWord{6, 11, 161}, + dictWord{7, 11, 372}, + dictWord{137, 11, 597}, + dictWord{132, 11, 349}, + dictWord{ + 132, + 0, + 777, + }, + dictWord{132, 0, 331}, + dictWord{135, 10, 631}, + dictWord{133, 0, 747}, + dictWord{6, 11, 432}, + dictWord{6, 11, 608}, + dictWord{139, 11, 322}, + dictWord{138, 10, 835}, + dictWord{5, 11, 468}, + dictWord{7, 11, 1809}, + dictWord{10, 11, 325}, + dictWord{11, 11, 856}, + dictWord{12, 11, 345}, + dictWord{ + 143, + 11, + 104, + }, + dictWord{133, 11, 223}, + dictWord{7, 10, 406}, + dictWord{7, 10, 459}, + dictWord{8, 10, 606}, + dictWord{139, 10, 726}, + dictWord{132, 11, 566}, + dictWord{142, 0, 68}, + dictWord{4, 11, 59}, + dictWord{135, 11, 1394}, + dictWord{6, 11, 436}, + dictWord{139, 11, 481}, + dictWord{4, 11, 48}, + dictWord{5, 11, 271}, + dictWord{135, 11, 953}, + dictWord{139, 11, 170}, + dictWord{5, 11, 610}, + dictWord{136, 11, 457}, + dictWord{133, 11, 755}, + dictWord{135, 11, 1217}, + dictWord{ + 133, + 10, + 612, + }, + dictWord{132, 11, 197}, + dictWord{132, 0, 505}, + dictWord{4, 10, 372}, + dictWord{7, 10, 482}, + dictWord{8, 10, 158}, + dictWord{9, 10, 602}, + dictWord{ + 9, + 10, + 615, + }, + dictWord{10, 10, 245}, + dictWord{10, 10, 678}, + dictWord{10, 10, 744}, + dictWord{11, 10, 248}, + dictWord{139, 10, 806}, + dictWord{133, 0, 326}, + dictWord{5, 10, 854}, + dictWord{135, 10, 1991}, + dictWord{4, 0, 691}, + dictWord{146, 0, 16}, + dictWord{6, 0, 628}, + dictWord{9, 0, 35}, + dictWord{10, 0, 680}, + dictWord{10, 0, 793}, + dictWord{11, 0, 364}, + dictWord{13, 0, 357}, + dictWord{143, 0, 164}, + dictWord{138, 0, 654}, + dictWord{6, 0, 32}, + dictWord{7, 0, 385}, + dictWord{ + 7, + 0, + 757, + }, + dictWord{7, 0, 1916}, + dictWord{8, 0, 37}, + dictWord{8, 0, 94}, + dictWord{8, 0, 711}, + dictWord{9, 0, 541}, + dictWord{10, 0, 162}, + dictWord{10, 0, 795}, + dictWord{ + 11, + 0, + 989, + }, + dictWord{11, 0, 1010}, + dictWord{12, 0, 14}, + dictWord{142, 0, 308}, + dictWord{133, 11, 217}, + dictWord{6, 0, 152}, + dictWord{6, 0, 349}, + dictWord{ + 6, + 0, + 1682, + }, + dictWord{7, 0, 1252}, + dictWord{8, 0, 112}, + dictWord{9, 0, 435}, + dictWord{9, 0, 668}, + dictWord{10, 0, 290}, + dictWord{10, 0, 319}, + dictWord{10, 0, 815}, + dictWord{11, 0, 180}, + dictWord{11, 0, 837}, + dictWord{12, 0, 240}, + dictWord{13, 0, 152}, + dictWord{13, 0, 219}, + dictWord{142, 0, 158}, + dictWord{4, 0, 581}, + dictWord{134, 0, 726}, + dictWord{5, 10, 195}, + dictWord{135, 10, 1685}, + dictWord{6, 0, 126}, + dictWord{7, 0, 573}, + dictWord{8, 0, 397}, + dictWord{142, 0, 44}, + dictWord{138, 0, 89}, + dictWord{7, 10, 1997}, + dictWord{8, 10, 730}, + dictWord{139, 10, 1006}, + dictWord{134, 0, 1531}, + dictWord{134, 0, 1167}, + dictWord{ + 5, + 0, + 926, + }, + dictWord{12, 0, 203}, + dictWord{133, 10, 751}, + dictWord{4, 11, 165}, + dictWord{7, 11, 1398}, + dictWord{135, 11, 1829}, + dictWord{7, 0, 1232}, + dictWord{137, 0, 531}, + dictWord{135, 10, 821}, + dictWord{134, 0, 943}, + dictWord{133, 0, 670}, + dictWord{4, 0, 880}, + dictWord{139, 0, 231}, + dictWord{ + 134, + 0, + 1617, + }, + dictWord{135, 0, 1957}, + dictWord{5, 11, 9}, + dictWord{7, 11, 297}, + dictWord{7, 11, 966}, + dictWord{140, 11, 306}, + dictWord{6, 0, 975}, + dictWord{ + 134, + 0, + 985, + }, + dictWord{5, 10, 950}, + dictWord{5, 10, 994}, + dictWord{134, 10, 351}, + dictWord{12, 11, 21}, + dictWord{151, 11, 7}, + dictWord{5, 11, 146}, + dictWord{ + 6, + 11, + 411, + }, + dictWord{138, 11, 721}, + dictWord{7, 0, 242}, + dictWord{135, 0, 1942}, + dictWord{6, 11, 177}, + dictWord{135, 11, 467}, + dictWord{5, 0, 421}, + dictWord{ + 7, + 10, + 47, + }, + dictWord{137, 10, 684}, + dictWord{5, 0, 834}, + dictWord{7, 0, 1202}, + dictWord{8, 0, 14}, + dictWord{9, 0, 481}, + dictWord{137, 0, 880}, + dictWord{138, 0, 465}, + dictWord{6, 0, 688}, + dictWord{9, 0, 834}, + dictWord{132, 10, 350}, + dictWord{132, 0, 855}, + dictWord{4, 0, 357}, + dictWord{6, 0, 172}, + dictWord{7, 0, 143}, + dictWord{137, 0, 413}, + dictWord{133, 11, 200}, + dictWord{132, 0, 590}, + dictWord{7, 10, 1812}, + dictWord{13, 10, 259}, + dictWord{13, 10, 356}, + dictWord{ + 14, + 10, + 242, + }, + dictWord{147, 10, 114}, + dictWord{133, 10, 967}, + dictWord{11, 0, 114}, + dictWord{4, 10, 473}, + dictWord{7, 10, 623}, + dictWord{8, 10, 808}, + dictWord{ + 9, + 10, + 871, + }, + dictWord{9, 10, 893}, + dictWord{11, 10, 431}, + dictWord{12, 10, 112}, + dictWord{12, 10, 217}, + dictWord{12, 10, 243}, + dictWord{12, 10, 562}, + dictWord{ + 12, + 10, + 663, + }, + dictWord{12, 10, 683}, + dictWord{13, 10, 141}, + dictWord{13, 10, 197}, + dictWord{13, 10, 227}, + dictWord{13, 10, 406}, + dictWord{13, 10, 487}, + dictWord{14, 10, 156}, + dictWord{14, 10, 203}, + dictWord{14, 10, 224}, + dictWord{14, 10, 256}, + dictWord{18, 10, 58}, + dictWord{150, 10, 0}, + dictWord{ + 138, + 10, + 286, + }, + dictWord{4, 10, 222}, + dictWord{7, 10, 286}, + dictWord{136, 10, 629}, + dictWord{5, 0, 169}, + dictWord{7, 0, 333}, + dictWord{136, 0, 45}, + dictWord{ + 134, + 11, + 481, + }, + dictWord{132, 0, 198}, + dictWord{4, 0, 24}, + dictWord{5, 0, 140}, + dictWord{5, 0, 185}, + dictWord{7, 0, 1500}, + dictWord{11, 0, 565}, + dictWord{11, 0, 838}, + dictWord{4, 11, 84}, + dictWord{7, 11, 1482}, + dictWord{10, 11, 76}, + dictWord{138, 11, 142}, + dictWord{133, 0, 585}, + dictWord{141, 10, 306}, + dictWord{ + 133, + 11, + 1015, + }, + dictWord{4, 11, 315}, + dictWord{5, 11, 507}, + dictWord{135, 11, 1370}, + dictWord{136, 10, 146}, + dictWord{6, 0, 691}, + dictWord{134, 0, 1503}, + dictWord{ + 4, + 0, + 334, + }, + dictWord{133, 0, 593}, + dictWord{4, 10, 465}, + dictWord{135, 10, 1663}, + dictWord{142, 11, 173}, + dictWord{135, 0, 913}, + dictWord{12, 0, 116}, + dictWord{134, 11, 1722}, + dictWord{134, 0, 1360}, + dictWord{132, 0, 802}, + dictWord{8, 11, 222}, + dictWord{8, 11, 476}, + dictWord{9, 11, 238}, + dictWord{ + 11, + 11, + 516, + }, + dictWord{11, 11, 575}, + dictWord{15, 11, 109}, + dictWord{146, 11, 100}, + dictWord{6, 0, 308}, + dictWord{9, 0, 673}, + dictWord{7, 10, 138}, + dictWord{ + 7, + 10, + 517, + }, + dictWord{139, 10, 238}, + dictWord{132, 0, 709}, + dictWord{6, 0, 1876}, + dictWord{6, 0, 1895}, + dictWord{9, 0, 994}, + dictWord{9, 0, 1006}, + dictWord{ + 12, + 0, + 829, + }, + dictWord{12, 0, 888}, + dictWord{12, 0, 891}, + dictWord{146, 0, 185}, + dictWord{148, 10, 94}, + dictWord{4, 0, 228}, + dictWord{133, 0, 897}, + dictWord{ + 7, + 0, + 1840, + }, + dictWord{5, 10, 495}, + dictWord{7, 10, 834}, + dictWord{9, 10, 733}, + dictWord{139, 10, 378}, + dictWord{133, 10, 559}, + dictWord{6, 10, 21}, + dictWord{ + 6, + 10, + 1737, + }, + dictWord{7, 10, 1444}, + dictWord{136, 10, 224}, + dictWord{4, 0, 608}, + dictWord{133, 0, 497}, + dictWord{6, 11, 40}, + dictWord{135, 11, 1781}, + dictWord{134, 0, 1573}, + dictWord{135, 0, 2039}, + dictWord{6, 0, 540}, + dictWord{136, 0, 136}, + dictWord{4, 0, 897}, + dictWord{5, 0, 786}, + dictWord{133, 10, 519}, + dictWord{6, 0, 1878}, + dictWord{6, 0, 1884}, + dictWord{9, 0, 938}, + dictWord{9, 0, 948}, + dictWord{9, 0, 955}, + dictWord{9, 0, 973}, + dictWord{9, 0, 1012}, + dictWord{ + 12, + 0, + 895, + }, + dictWord{12, 0, 927}, + dictWord{143, 0, 254}, + dictWord{134, 0, 1469}, + dictWord{133, 0, 999}, + dictWord{4, 0, 299}, + dictWord{135, 0, 1004}, + dictWord{ + 4, + 0, + 745, + }, + dictWord{133, 0, 578}, + dictWord{136, 11, 574}, + dictWord{133, 0, 456}, + dictWord{134, 0, 1457}, + dictWord{7, 0, 1679}, + dictWord{132, 10, 402}, + dictWord{7, 0, 693}, + dictWord{8, 0, 180}, + dictWord{12, 0, 163}, + dictWord{8, 10, 323}, + dictWord{136, 10, 479}, + dictWord{11, 10, 580}, + dictWord{142, 10, 201}, + dictWord{5, 10, 59}, + dictWord{135, 10, 672}, + dictWord{132, 11, 354}, + dictWord{146, 10, 34}, + dictWord{4, 0, 755}, + dictWord{135, 11, 1558}, + dictWord{ + 7, + 0, + 1740, + }, + dictWord{146, 0, 48}, + dictWord{4, 10, 85}, + dictWord{135, 10, 549}, + dictWord{139, 0, 338}, + dictWord{133, 10, 94}, + dictWord{134, 0, 1091}, + dictWord{135, 11, 469}, + dictWord{12, 0, 695}, + dictWord{12, 0, 704}, + dictWord{20, 0, 113}, + dictWord{5, 11, 830}, + dictWord{14, 11, 338}, + dictWord{148, 11, 81}, + dictWord{135, 0, 1464}, + dictWord{6, 10, 11}, + dictWord{135, 10, 187}, + dictWord{135, 0, 975}, + dictWord{13, 0, 335}, + dictWord{132, 10, 522}, + dictWord{ + 134, + 0, + 1979, + }, + dictWord{5, 11, 496}, + dictWord{135, 11, 203}, + dictWord{4, 10, 52}, + dictWord{135, 10, 661}, + dictWord{7, 0, 1566}, + dictWord{8, 0, 269}, + dictWord{ + 9, + 0, + 212, + }, + dictWord{9, 0, 718}, + dictWord{14, 0, 15}, + dictWord{14, 0, 132}, + dictWord{142, 0, 227}, + dictWord{4, 0, 890}, + dictWord{5, 0, 805}, + dictWord{5, 0, 819}, + dictWord{ + 5, + 0, + 961, + }, + dictWord{6, 0, 396}, + dictWord{6, 0, 1631}, + dictWord{6, 0, 1678}, + dictWord{7, 0, 1967}, + dictWord{7, 0, 2041}, + dictWord{9, 0, 630}, + dictWord{11, 0, 8}, + dictWord{11, 0, 1019}, + dictWord{12, 0, 176}, + dictWord{13, 0, 225}, + dictWord{14, 0, 292}, + dictWord{21, 0, 24}, + dictWord{4, 10, 383}, + dictWord{133, 10, 520}, + dictWord{134, 11, 547}, + dictWord{135, 11, 1748}, + dictWord{5, 11, 88}, + dictWord{137, 11, 239}, + dictWord{146, 11, 128}, + dictWord{7, 11, 650}, + dictWord{ + 135, + 11, + 1310, + }, + dictWord{4, 10, 281}, + dictWord{5, 10, 38}, + dictWord{7, 10, 194}, + dictWord{7, 10, 668}, + dictWord{7, 10, 1893}, + dictWord{137, 10, 397}, + dictWord{135, 0, 1815}, + dictWord{9, 10, 635}, + dictWord{139, 10, 559}, + dictWord{7, 0, 1505}, + dictWord{10, 0, 190}, + dictWord{10, 0, 634}, + dictWord{11, 0, 792}, + dictWord{12, 0, 358}, + dictWord{140, 0, 447}, + dictWord{5, 0, 0}, + dictWord{6, 0, 536}, + dictWord{7, 0, 604}, + dictWord{13, 0, 445}, + dictWord{145, 0, 126}, + dictWord{ + 7, + 11, + 1076, + }, + dictWord{9, 11, 80}, + dictWord{11, 11, 78}, + dictWord{11, 11, 421}, + dictWord{11, 11, 534}, + dictWord{140, 11, 545}, + dictWord{8, 0, 966}, + dictWord{ + 10, + 0, + 1023, + }, + dictWord{14, 11, 369}, + dictWord{146, 11, 72}, + dictWord{135, 11, 1641}, + dictWord{6, 0, 232}, + dictWord{6, 0, 412}, + dictWord{7, 0, 1074}, + dictWord{ + 8, + 0, + 9, + }, + dictWord{8, 0, 157}, + dictWord{8, 0, 786}, + dictWord{9, 0, 196}, + dictWord{9, 0, 352}, + dictWord{9, 0, 457}, + dictWord{10, 0, 337}, + dictWord{11, 0, 232}, + dictWord{ + 11, + 0, + 877, + }, + dictWord{12, 0, 480}, + dictWord{140, 0, 546}, + dictWord{135, 0, 958}, + dictWord{4, 0, 382}, + dictWord{136, 0, 579}, + dictWord{4, 0, 212}, + dictWord{ + 135, + 0, + 1206, + }, + dictWord{4, 11, 497}, + dictWord{5, 11, 657}, + dictWord{135, 11, 1584}, + dictWord{132, 0, 681}, + dictWord{8, 0, 971}, + dictWord{138, 0, 965}, + dictWord{ + 5, + 10, + 448, + }, + dictWord{136, 10, 535}, + dictWord{14, 0, 16}, + dictWord{146, 0, 44}, + dictWord{11, 0, 584}, + dictWord{11, 0, 616}, + dictWord{14, 0, 275}, + dictWord{ + 11, + 11, + 584, + }, + dictWord{11, 11, 616}, + dictWord{142, 11, 275}, + dictWord{136, 11, 13}, + dictWord{7, 10, 610}, + dictWord{135, 10, 1501}, + dictWord{7, 11, 642}, + dictWord{8, 11, 250}, + dictWord{11, 11, 123}, + dictWord{11, 11, 137}, + dictWord{13, 11, 48}, + dictWord{142, 11, 95}, + dictWord{133, 0, 655}, + dictWord{17, 0, 67}, + dictWord{147, 0, 74}, + dictWord{134, 0, 751}, + dictWord{134, 0, 1967}, + dictWord{6, 0, 231}, + dictWord{136, 0, 423}, + dictWord{5, 0, 300}, + dictWord{138, 0, 1016}, + dictWord{4, 10, 319}, + dictWord{5, 10, 699}, + dictWord{138, 10, 673}, + dictWord{6, 0, 237}, + dictWord{7, 0, 611}, + dictWord{8, 0, 100}, + dictWord{9, 0, 416}, + dictWord{ + 11, + 0, + 335, + }, + dictWord{12, 0, 173}, + dictWord{18, 0, 101}, + dictWord{6, 10, 336}, + dictWord{8, 10, 552}, + dictWord{9, 10, 285}, + dictWord{10, 10, 99}, + dictWord{ + 139, + 10, + 568, + }, + dictWord{134, 0, 1370}, + dictWord{7, 10, 1406}, + dictWord{9, 10, 218}, + dictWord{141, 10, 222}, + dictWord{133, 10, 256}, + dictWord{ + 135, + 0, + 1208, + }, + dictWord{14, 11, 213}, + dictWord{148, 11, 38}, + dictWord{6, 0, 1219}, + dictWord{135, 11, 1642}, + dictWord{13, 0, 417}, + dictWord{14, 0, 129}, + dictWord{143, 0, 15}, + dictWord{10, 11, 545}, + dictWord{140, 11, 301}, + dictWord{17, 10, 39}, + dictWord{148, 10, 36}, + dictWord{133, 0, 199}, + dictWord{4, 11, 904}, + dictWord{133, 11, 794}, + dictWord{12, 0, 427}, + dictWord{146, 0, 38}, + dictWord{134, 0, 949}, + dictWord{8, 0, 665}, + dictWord{135, 10, 634}, + dictWord{ + 132, + 10, + 618, + }, + dictWord{135, 10, 259}, + dictWord{132, 10, 339}, + dictWord{133, 11, 761}, + dictWord{141, 10, 169}, + dictWord{132, 10, 759}, + dictWord{5, 0, 688}, + dictWord{7, 0, 539}, + dictWord{135, 0, 712}, + dictWord{7, 11, 386}, + dictWord{138, 11, 713}, + dictWord{134, 0, 1186}, + dictWord{6, 11, 7}, + dictWord{6, 11, 35}, + dictWord{ + 7, + 11, + 147, + }, + dictWord{7, 11, 1069}, + dictWord{7, 11, 1568}, + dictWord{7, 11, 1575}, + dictWord{7, 11, 1917}, + dictWord{8, 11, 43}, + dictWord{8, 11, 208}, + dictWord{ + 9, + 11, + 128, + }, + dictWord{9, 11, 866}, + dictWord{10, 11, 20}, + dictWord{11, 11, 981}, + dictWord{147, 11, 33}, + dictWord{7, 11, 893}, + dictWord{8, 10, 482}, + dictWord{141, 11, 424}, + dictWord{6, 0, 312}, + dictWord{6, 0, 1715}, + dictWord{10, 0, 584}, + dictWord{11, 0, 546}, + dictWord{11, 0, 692}, + dictWord{12, 0, 259}, + dictWord{ + 12, + 0, + 295, + }, + dictWord{13, 0, 46}, + dictWord{141, 0, 154}, + dictWord{5, 10, 336}, + dictWord{6, 10, 341}, + dictWord{6, 10, 478}, + dictWord{6, 10, 1763}, + dictWord{ + 136, + 10, + 386, + }, + dictWord{137, 0, 151}, + dictWord{132, 0, 588}, + dictWord{152, 0, 4}, + dictWord{6, 11, 322}, + dictWord{9, 11, 552}, + dictWord{11, 11, 274}, + dictWord{ + 13, + 11, + 209, + }, + dictWord{13, 11, 499}, + dictWord{14, 11, 85}, + dictWord{15, 11, 126}, + dictWord{145, 11, 70}, + dictWord{135, 10, 73}, + dictWord{4, 0, 231}, + dictWord{ + 5, + 0, + 61, + }, + dictWord{6, 0, 104}, + dictWord{7, 0, 729}, + dictWord{7, 0, 964}, + dictWord{7, 0, 1658}, + dictWord{140, 0, 414}, + dictWord{6, 0, 263}, + dictWord{138, 0, 757}, + dictWord{135, 10, 1971}, + dictWord{4, 0, 612}, + dictWord{133, 0, 561}, + dictWord{132, 0, 320}, + dictWord{135, 10, 1344}, + dictWord{8, 11, 83}, + dictWord{ + 8, + 11, + 817, + }, + dictWord{9, 11, 28}, + dictWord{9, 11, 29}, + dictWord{9, 11, 885}, + dictWord{10, 11, 387}, + dictWord{11, 11, 633}, + dictWord{11, 11, 740}, + dictWord{ + 13, + 11, + 235, + }, + dictWord{13, 11, 254}, + dictWord{15, 11, 143}, + dictWord{143, 11, 146}, + dictWord{5, 10, 396}, + dictWord{134, 10, 501}, + dictWord{140, 11, 49}, + dictWord{132, 0, 225}, + dictWord{4, 10, 929}, + dictWord{5, 10, 799}, + dictWord{8, 10, 46}, + dictWord{136, 10, 740}, + dictWord{4, 0, 405}, + dictWord{7, 0, 817}, + dictWord{ + 14, + 0, + 58, + }, + dictWord{17, 0, 37}, + dictWord{146, 0, 124}, + dictWord{133, 0, 974}, + dictWord{4, 11, 412}, + dictWord{133, 11, 581}, + dictWord{4, 10, 892}, + dictWord{ + 133, + 10, + 770, + }, + dictWord{4, 0, 996}, + dictWord{134, 0, 2026}, + dictWord{4, 0, 527}, + dictWord{5, 0, 235}, + dictWord{7, 0, 1239}, + dictWord{11, 0, 131}, + dictWord{ + 140, + 0, + 370, + }, + dictWord{9, 0, 16}, + dictWord{13, 0, 386}, + dictWord{135, 11, 421}, + dictWord{7, 0, 956}, + dictWord{7, 0, 1157}, + dictWord{7, 0, 1506}, + dictWord{7, 0, 1606}, + dictWord{7, 0, 1615}, + dictWord{7, 0, 1619}, + dictWord{7, 0, 1736}, + dictWord{7, 0, 1775}, + dictWord{8, 0, 590}, + dictWord{9, 0, 324}, + dictWord{9, 0, 736}, + dictWord{ + 9, + 0, + 774, + }, + dictWord{9, 0, 776}, + dictWord{9, 0, 784}, + dictWord{10, 0, 567}, + dictWord{10, 0, 708}, + dictWord{11, 0, 518}, + dictWord{11, 0, 613}, + dictWord{11, 0, 695}, + dictWord{11, 0, 716}, + dictWord{11, 0, 739}, + dictWord{11, 0, 770}, + dictWord{11, 0, 771}, + dictWord{11, 0, 848}, + dictWord{11, 0, 857}, + dictWord{11, 0, 931}, + dictWord{ + 11, + 0, + 947, + }, + dictWord{12, 0, 326}, + dictWord{12, 0, 387}, + dictWord{12, 0, 484}, + dictWord{12, 0, 528}, + dictWord{12, 0, 552}, + dictWord{12, 0, 613}, + dictWord{ + 13, + 0, + 189, + }, + dictWord{13, 0, 256}, + dictWord{13, 0, 340}, + dictWord{13, 0, 432}, + dictWord{13, 0, 436}, + dictWord{13, 0, 440}, + dictWord{13, 0, 454}, + dictWord{14, 0, 174}, + dictWord{14, 0, 220}, + dictWord{14, 0, 284}, + dictWord{14, 0, 390}, + dictWord{145, 0, 121}, + dictWord{135, 10, 158}, + dictWord{9, 0, 137}, + dictWord{138, 0, 221}, + dictWord{4, 11, 110}, + dictWord{10, 11, 415}, + dictWord{10, 11, 597}, + dictWord{142, 11, 206}, + dictWord{141, 11, 496}, + dictWord{135, 11, 205}, + dictWord{ + 151, + 10, + 25, + }, + dictWord{135, 11, 778}, + dictWord{7, 11, 1656}, + dictWord{7, 10, 2001}, + dictWord{9, 11, 369}, + dictWord{10, 11, 338}, + dictWord{10, 11, 490}, + dictWord{11, 11, 154}, + dictWord{11, 11, 545}, + dictWord{11, 11, 775}, + dictWord{13, 11, 77}, + dictWord{141, 11, 274}, + dictWord{4, 11, 444}, + dictWord{ + 10, + 11, + 146, + }, + dictWord{140, 11, 9}, + dictWord{7, 0, 390}, + dictWord{138, 0, 140}, + dictWord{135, 0, 1144}, + dictWord{134, 0, 464}, + dictWord{7, 10, 1461}, + dictWord{ + 140, + 10, + 91, + }, + dictWord{132, 10, 602}, + dictWord{4, 11, 283}, + dictWord{135, 11, 1194}, + dictWord{5, 0, 407}, + dictWord{11, 0, 204}, + dictWord{11, 0, 243}, + dictWord{ + 11, + 0, + 489, + }, + dictWord{12, 0, 293}, + dictWord{19, 0, 37}, + dictWord{20, 0, 73}, + dictWord{150, 0, 38}, + dictWord{7, 0, 1218}, + dictWord{136, 0, 303}, + dictWord{ + 5, + 0, + 325, + }, + dictWord{8, 0, 5}, + dictWord{8, 0, 227}, + dictWord{9, 0, 105}, + dictWord{10, 0, 585}, + dictWord{12, 0, 614}, + dictWord{4, 10, 13}, + dictWord{5, 10, 567}, + dictWord{ + 7, + 10, + 1498, + }, + dictWord{9, 10, 124}, + dictWord{11, 10, 521}, + dictWord{140, 10, 405}, + dictWord{135, 10, 1006}, + dictWord{7, 0, 800}, + dictWord{10, 0, 12}, + dictWord{134, 11, 1720}, + dictWord{135, 0, 1783}, + dictWord{132, 10, 735}, + dictWord{138, 10, 812}, + dictWord{4, 10, 170}, + dictWord{135, 10, 323}, + dictWord{ + 6, + 0, + 621, + }, + dictWord{13, 0, 504}, + dictWord{144, 0, 89}, + dictWord{5, 10, 304}, + dictWord{135, 10, 1403}, + dictWord{137, 11, 216}, + dictWord{6, 0, 920}, + dictWord{ + 6, + 0, + 1104, + }, + dictWord{9, 11, 183}, + dictWord{139, 11, 286}, + dictWord{4, 0, 376}, + dictWord{133, 10, 742}, + dictWord{134, 0, 218}, + dictWord{8, 0, 641}, + dictWord{ + 11, + 0, + 388, + }, + dictWord{140, 0, 580}, + dictWord{7, 0, 454}, + dictWord{7, 0, 782}, + dictWord{8, 0, 768}, + dictWord{140, 0, 686}, + dictWord{137, 11, 33}, + dictWord{ + 133, + 10, + 111, + }, + dictWord{144, 0, 0}, + dictWord{10, 0, 676}, + dictWord{140, 0, 462}, + dictWord{6, 0, 164}, + dictWord{136, 11, 735}, + dictWord{133, 10, 444}, + dictWord{ + 150, + 0, + 50, + }, + dictWord{7, 11, 1862}, + dictWord{12, 11, 491}, + dictWord{12, 11, 520}, + dictWord{13, 11, 383}, + dictWord{14, 11, 244}, + dictWord{146, 11, 12}, + dictWord{ + 5, + 11, + 132, + }, + dictWord{9, 11, 486}, + dictWord{9, 11, 715}, + dictWord{10, 11, 458}, + dictWord{11, 11, 373}, + dictWord{11, 11, 668}, + dictWord{11, 11, 795}, + dictWord{11, 11, 897}, + dictWord{12, 11, 272}, + dictWord{12, 11, 424}, + dictWord{12, 11, 539}, + dictWord{12, 11, 558}, + dictWord{14, 11, 245}, + dictWord{ + 14, + 11, + 263, + }, + dictWord{14, 11, 264}, + dictWord{14, 11, 393}, + dictWord{142, 11, 403}, + dictWord{8, 10, 123}, + dictWord{15, 10, 6}, + dictWord{144, 10, 7}, + dictWord{ + 6, + 0, + 285, + }, + dictWord{8, 0, 654}, + dictWord{11, 0, 749}, + dictWord{12, 0, 190}, + dictWord{12, 0, 327}, + dictWord{13, 0, 120}, + dictWord{13, 0, 121}, + dictWord{13, 0, 327}, + dictWord{15, 0, 47}, + dictWord{146, 0, 40}, + dictWord{5, 11, 8}, + dictWord{6, 11, 89}, + dictWord{6, 11, 400}, + dictWord{7, 11, 1569}, + dictWord{7, 11, 1623}, + dictWord{ + 7, + 11, + 1850, + }, + dictWord{8, 11, 218}, + dictWord{8, 11, 422}, + dictWord{9, 11, 570}, + dictWord{138, 11, 626}, + dictWord{6, 11, 387}, + dictWord{7, 11, 882}, + dictWord{141, 11, 111}, + dictWord{6, 0, 343}, + dictWord{7, 0, 195}, + dictWord{9, 0, 226}, + dictWord{10, 0, 197}, + dictWord{10, 0, 575}, + dictWord{11, 0, 502}, + dictWord{ + 11, + 0, + 899, + }, + dictWord{6, 11, 224}, + dictWord{7, 11, 877}, + dictWord{137, 11, 647}, + dictWord{5, 10, 937}, + dictWord{135, 10, 100}, + dictWord{135, 11, 790}, + dictWord{150, 0, 29}, + dictWord{147, 0, 8}, + dictWord{134, 0, 1812}, + dictWord{149, 0, 8}, + dictWord{135, 11, 394}, + dictWord{7, 0, 1125}, + dictWord{9, 0, 143}, + dictWord{ + 11, + 0, + 61, + }, + dictWord{14, 0, 405}, + dictWord{150, 0, 21}, + dictWord{10, 11, 755}, + dictWord{147, 11, 29}, + dictWord{9, 11, 378}, + dictWord{141, 11, 162}, + dictWord{135, 10, 922}, + dictWord{5, 10, 619}, + dictWord{133, 10, 698}, + dictWord{134, 0, 1327}, + dictWord{6, 0, 1598}, + dictWord{137, 0, 575}, + dictWord{ + 9, + 11, + 569, + }, + dictWord{12, 11, 12}, + dictWord{12, 11, 81}, + dictWord{12, 11, 319}, + dictWord{13, 11, 69}, + dictWord{14, 11, 259}, + dictWord{16, 11, 87}, + dictWord{ + 17, + 11, + 1, + }, + dictWord{17, 11, 21}, + dictWord{17, 11, 24}, + dictWord{18, 11, 15}, + dictWord{18, 11, 56}, + dictWord{18, 11, 59}, + dictWord{18, 11, 127}, + dictWord{18, 11, 154}, + dictWord{19, 11, 19}, + dictWord{148, 11, 31}, + dictWord{6, 0, 895}, + dictWord{135, 11, 1231}, + dictWord{5, 0, 959}, + dictWord{7, 11, 124}, + dictWord{136, 11, 38}, + dictWord{5, 11, 261}, + dictWord{7, 11, 78}, + dictWord{7, 11, 199}, + dictWord{8, 11, 815}, + dictWord{9, 11, 126}, + dictWord{138, 11, 342}, + dictWord{5, 10, 917}, + dictWord{134, 10, 1659}, + dictWord{7, 0, 1759}, + dictWord{5, 11, 595}, + dictWord{135, 11, 1863}, + dictWord{136, 0, 173}, + dictWord{134, 0, 266}, + dictWord{ + 142, + 0, + 261, + }, + dictWord{132, 11, 628}, + dictWord{5, 10, 251}, + dictWord{5, 10, 956}, + dictWord{8, 10, 268}, + dictWord{9, 10, 214}, + dictWord{146, 10, 142}, + dictWord{ + 7, + 11, + 266, + }, + dictWord{136, 11, 804}, + dictWord{135, 11, 208}, + dictWord{6, 11, 79}, + dictWord{7, 11, 1021}, + dictWord{135, 11, 1519}, + dictWord{11, 11, 704}, + dictWord{141, 11, 396}, + dictWord{5, 10, 346}, + dictWord{5, 10, 711}, + dictWord{136, 10, 390}, + dictWord{136, 11, 741}, + dictWord{134, 11, 376}, + dictWord{ + 134, + 0, + 1427, + }, + dictWord{6, 0, 1033}, + dictWord{6, 0, 1217}, + dictWord{136, 0, 300}, + dictWord{133, 10, 624}, + dictWord{6, 11, 100}, + dictWord{7, 11, 244}, + dictWord{ + 7, + 11, + 632, + }, + dictWord{7, 11, 1609}, + dictWord{8, 11, 178}, + dictWord{8, 11, 638}, + dictWord{141, 11, 58}, + dictWord{6, 0, 584}, + dictWord{5, 10, 783}, + dictWord{ + 7, + 10, + 1998, + }, + dictWord{135, 10, 2047}, + dictWord{5, 0, 427}, + dictWord{5, 0, 734}, + dictWord{7, 0, 478}, + dictWord{136, 0, 52}, + dictWord{7, 0, 239}, + dictWord{ + 11, + 0, + 217, + }, + dictWord{142, 0, 165}, + dictWord{134, 0, 1129}, + dictWord{6, 0, 168}, + dictWord{6, 0, 1734}, + dictWord{7, 0, 20}, + dictWord{7, 0, 1056}, + dictWord{8, 0, 732}, + dictWord{9, 0, 406}, + dictWord{9, 0, 911}, + dictWord{138, 0, 694}, + dictWord{132, 10, 594}, + dictWord{133, 11, 791}, + dictWord{7, 11, 686}, + dictWord{8, 11, 33}, + dictWord{8, 11, 238}, + dictWord{10, 11, 616}, + dictWord{11, 11, 467}, + dictWord{11, 11, 881}, + dictWord{13, 11, 217}, + dictWord{13, 11, 253}, + dictWord{ + 142, + 11, + 268, + }, + dictWord{137, 11, 476}, + dictWord{134, 0, 418}, + dictWord{133, 0, 613}, + dictWord{132, 0, 632}, + dictWord{132, 11, 447}, + dictWord{7, 0, 32}, + dictWord{ + 7, + 0, + 984, + }, + dictWord{8, 0, 85}, + dictWord{8, 0, 709}, + dictWord{9, 0, 579}, + dictWord{9, 0, 847}, + dictWord{9, 0, 856}, + dictWord{10, 0, 799}, + dictWord{11, 0, 258}, + dictWord{ + 11, + 0, + 1007, + }, + dictWord{12, 0, 331}, + dictWord{12, 0, 615}, + dictWord{13, 0, 188}, + dictWord{13, 0, 435}, + dictWord{14, 0, 8}, + dictWord{15, 0, 165}, + dictWord{ + 16, + 0, + 27, + }, + dictWord{20, 0, 40}, + dictWord{144, 11, 35}, + dictWord{4, 11, 128}, + dictWord{5, 11, 415}, + dictWord{6, 11, 462}, + dictWord{7, 11, 294}, + dictWord{7, 11, 578}, + dictWord{10, 11, 710}, + dictWord{139, 11, 86}, + dictWord{5, 0, 694}, + dictWord{136, 0, 909}, + dictWord{7, 0, 1109}, + dictWord{11, 0, 7}, + dictWord{5, 10, 37}, + dictWord{ + 6, + 10, + 39, + }, + dictWord{6, 10, 451}, + dictWord{7, 10, 218}, + dictWord{7, 10, 1166}, + dictWord{7, 10, 1687}, + dictWord{8, 10, 662}, + dictWord{144, 10, 2}, + dictWord{ + 136, + 11, + 587, + }, + dictWord{6, 11, 427}, + dictWord{7, 11, 1018}, + dictWord{138, 11, 692}, + dictWord{4, 11, 195}, + dictWord{6, 10, 508}, + dictWord{135, 11, 802}, + dictWord{4, 0, 167}, + dictWord{135, 0, 82}, + dictWord{5, 0, 62}, + dictWord{6, 0, 24}, + dictWord{6, 0, 534}, + dictWord{7, 0, 74}, + dictWord{7, 0, 678}, + dictWord{7, 0, 684}, + dictWord{ + 7, + 0, + 1043, + }, + dictWord{7, 0, 1072}, + dictWord{8, 0, 280}, + dictWord{8, 0, 541}, + dictWord{8, 0, 686}, + dictWord{9, 0, 258}, + dictWord{10, 0, 519}, + dictWord{11, 0, 252}, + dictWord{140, 0, 282}, + dictWord{138, 0, 33}, + dictWord{4, 0, 359}, + dictWord{133, 11, 738}, + dictWord{7, 0, 980}, + dictWord{9, 0, 328}, + dictWord{13, 0, 186}, + dictWord{13, 0, 364}, + dictWord{7, 10, 635}, + dictWord{7, 10, 796}, + dictWord{8, 10, 331}, + dictWord{9, 10, 330}, + dictWord{9, 10, 865}, + dictWord{10, 10, 119}, + dictWord{ + 10, + 10, + 235, + }, + dictWord{11, 10, 111}, + dictWord{11, 10, 129}, + dictWord{11, 10, 240}, + dictWord{12, 10, 31}, + dictWord{12, 10, 66}, + dictWord{12, 10, 222}, + dictWord{12, 10, 269}, + dictWord{12, 10, 599}, + dictWord{12, 10, 684}, + dictWord{12, 10, 689}, + dictWord{12, 10, 691}, + dictWord{142, 10, 345}, + dictWord{ + 137, + 10, + 527, + }, + dictWord{6, 0, 596}, + dictWord{7, 0, 585}, + dictWord{135, 10, 702}, + dictWord{134, 11, 1683}, + dictWord{133, 0, 211}, + dictWord{6, 0, 145}, + dictWord{ + 141, + 0, + 336, + }, + dictWord{134, 0, 1130}, + dictWord{7, 0, 873}, + dictWord{6, 10, 37}, + dictWord{7, 10, 1666}, + dictWord{8, 10, 195}, + dictWord{8, 10, 316}, + dictWord{ + 9, + 10, + 178, + }, + dictWord{9, 10, 276}, + dictWord{9, 10, 339}, + dictWord{9, 10, 536}, + dictWord{10, 10, 102}, + dictWord{10, 10, 362}, + dictWord{10, 10, 785}, + dictWord{ + 11, + 10, + 55, + }, + dictWord{11, 10, 149}, + dictWord{11, 10, 773}, + dictWord{13, 10, 416}, + dictWord{13, 10, 419}, + dictWord{14, 10, 38}, + dictWord{14, 10, 41}, + dictWord{ + 142, + 10, + 210, + }, + dictWord{8, 0, 840}, + dictWord{136, 0, 841}, + dictWord{132, 0, 263}, + dictWord{5, 11, 3}, + dictWord{8, 11, 578}, + dictWord{9, 11, 118}, + dictWord{ + 10, + 11, + 705, + }, + dictWord{12, 11, 383}, + dictWord{141, 11, 279}, + dictWord{132, 0, 916}, + dictWord{133, 11, 229}, + dictWord{133, 10, 645}, + dictWord{15, 0, 155}, + dictWord{16, 0, 79}, + dictWord{8, 11, 102}, + dictWord{10, 11, 578}, + dictWord{10, 11, 672}, + dictWord{12, 11, 496}, + dictWord{13, 11, 408}, + dictWord{14, 11, 121}, + dictWord{145, 11, 106}, + dictWord{4, 0, 599}, + dictWord{5, 0, 592}, + dictWord{6, 0, 1634}, + dictWord{7, 0, 5}, + dictWord{7, 0, 55}, + dictWord{7, 0, 67}, + dictWord{7, 0, 97}, + dictWord{7, 0, 691}, + dictWord{7, 0, 979}, + dictWord{7, 0, 1600}, + dictWord{7, 0, 1697}, + dictWord{8, 0, 207}, + dictWord{8, 0, 214}, + dictWord{8, 0, 231}, + dictWord{8, 0, 294}, + dictWord{8, 0, 336}, + dictWord{8, 0, 428}, + dictWord{8, 0, 471}, + dictWord{8, 0, 622}, + dictWord{8, 0, 626}, + dictWord{8, 0, 679}, + dictWord{8, 0, 759}, + dictWord{8, 0, 829}, + dictWord{9, 0, 11}, + dictWord{9, 0, 246}, + dictWord{9, 0, 484}, + dictWord{9, 0, 573}, + dictWord{9, 0, 706}, + dictWord{9, 0, 762}, + dictWord{9, 0, 798}, + dictWord{9, 0, 855}, + dictWord{9, 0, 870}, + dictWord{9, 0, 912}, + dictWord{10, 0, 303}, + dictWord{10, 0, 335}, + dictWord{10, 0, 424}, + dictWord{10, 0, 461}, + dictWord{10, 0, 543}, + dictWord{ + 10, + 0, + 759, + }, + dictWord{10, 0, 814}, + dictWord{11, 0, 59}, + dictWord{11, 0, 199}, + dictWord{11, 0, 235}, + dictWord{11, 0, 590}, + dictWord{11, 0, 631}, + dictWord{11, 0, 929}, + dictWord{11, 0, 963}, + dictWord{11, 0, 987}, + dictWord{12, 0, 114}, + dictWord{12, 0, 182}, + dictWord{12, 0, 226}, + dictWord{12, 0, 332}, + dictWord{12, 0, 439}, + dictWord{12, 0, 575}, + dictWord{12, 0, 598}, + dictWord{12, 0, 675}, + dictWord{13, 0, 8}, + dictWord{13, 0, 125}, + dictWord{13, 0, 194}, + dictWord{13, 0, 287}, + dictWord{ + 14, + 0, + 197, + }, + dictWord{14, 0, 383}, + dictWord{15, 0, 53}, + dictWord{17, 0, 63}, + dictWord{19, 0, 46}, + dictWord{19, 0, 98}, + dictWord{19, 0, 106}, + dictWord{148, 0, 85}, + dictWord{ + 7, + 0, + 1356, + }, + dictWord{132, 10, 290}, + dictWord{6, 10, 70}, + dictWord{7, 10, 1292}, + dictWord{10, 10, 762}, + dictWord{139, 10, 288}, + dictWord{150, 11, 55}, + dictWord{4, 0, 593}, + dictWord{8, 11, 115}, + dictWord{8, 11, 350}, + dictWord{9, 11, 489}, + dictWord{10, 11, 128}, + dictWord{11, 11, 306}, + dictWord{12, 11, 373}, + dictWord{14, 11, 30}, + dictWord{17, 11, 79}, + dictWord{147, 11, 80}, + dictWord{135, 11, 1235}, + dictWord{134, 0, 1392}, + dictWord{4, 11, 230}, + dictWord{ + 133, + 11, + 702, + }, + dictWord{147, 0, 126}, + dictWord{7, 10, 131}, + dictWord{7, 10, 422}, + dictWord{8, 10, 210}, + dictWord{140, 10, 573}, + dictWord{134, 0, 1179}, + dictWord{ + 139, + 11, + 435, + }, + dictWord{139, 10, 797}, + dictWord{134, 11, 1728}, + dictWord{4, 0, 162}, + dictWord{18, 11, 26}, + dictWord{19, 11, 42}, + dictWord{20, 11, 43}, + dictWord{21, 11, 0}, + dictWord{23, 11, 27}, + dictWord{152, 11, 14}, + dictWord{132, 10, 936}, + dictWord{6, 0, 765}, + dictWord{5, 10, 453}, + dictWord{134, 10, 441}, + dictWord{133, 0, 187}, + dictWord{135, 0, 1286}, + dictWord{6, 0, 635}, + dictWord{6, 0, 904}, + dictWord{6, 0, 1210}, + dictWord{134, 0, 1489}, + dictWord{4, 0, 215}, + dictWord{ + 8, + 0, + 890, + }, + dictWord{9, 0, 38}, + dictWord{10, 0, 923}, + dictWord{11, 0, 23}, + dictWord{11, 0, 127}, + dictWord{139, 0, 796}, + dictWord{6, 0, 1165}, + dictWord{ + 134, + 0, + 1306, + }, + dictWord{7, 0, 716}, + dictWord{13, 0, 97}, + dictWord{141, 0, 251}, + dictWord{132, 10, 653}, + dictWord{136, 0, 657}, + dictWord{146, 10, 80}, + dictWord{ + 5, + 11, + 622, + }, + dictWord{7, 11, 1032}, + dictWord{11, 11, 26}, + dictWord{11, 11, 213}, + dictWord{11, 11, 707}, + dictWord{12, 11, 380}, + dictWord{13, 11, 226}, + dictWord{141, 11, 355}, + dictWord{6, 0, 299}, + dictWord{5, 11, 70}, + dictWord{6, 11, 334}, + dictWord{9, 11, 171}, + dictWord{11, 11, 637}, + dictWord{12, 11, 202}, + dictWord{14, 11, 222}, + dictWord{145, 11, 42}, + dictWord{142, 0, 134}, + dictWord{4, 11, 23}, + dictWord{5, 11, 313}, + dictWord{5, 11, 1014}, + dictWord{6, 11, 50}, + dictWord{ + 6, + 11, + 51, + }, + dictWord{7, 11, 142}, + dictWord{7, 11, 384}, + dictWord{9, 11, 783}, + dictWord{139, 11, 741}, + dictWord{4, 11, 141}, + dictWord{7, 11, 559}, + dictWord{ + 8, + 11, + 640, + }, + dictWord{9, 11, 460}, + dictWord{12, 11, 183}, + dictWord{141, 11, 488}, + dictWord{136, 11, 614}, + dictWord{7, 10, 1368}, + dictWord{8, 10, 232}, + dictWord{8, 10, 361}, + dictWord{10, 10, 682}, + dictWord{138, 10, 742}, + dictWord{137, 10, 534}, + dictWord{6, 0, 1082}, + dictWord{140, 0, 658}, + dictWord{ + 137, + 10, + 27, + }, + dictWord{135, 0, 2002}, + dictWord{142, 10, 12}, + dictWord{4, 0, 28}, + dictWord{5, 0, 440}, + dictWord{7, 0, 248}, + dictWord{11, 0, 833}, + dictWord{140, 0, 344}, + dictWord{7, 10, 736}, + dictWord{139, 10, 264}, + dictWord{134, 10, 1657}, + dictWord{134, 0, 1654}, + dictWord{138, 0, 531}, + dictWord{5, 11, 222}, + dictWord{ + 9, + 11, + 140, + }, + dictWord{138, 11, 534}, + dictWord{6, 0, 634}, + dictWord{6, 0, 798}, + dictWord{134, 0, 840}, + dictWord{138, 11, 503}, + dictWord{135, 10, 127}, + dictWord{133, 0, 853}, + dictWord{5, 11, 154}, + dictWord{7, 11, 1491}, + dictWord{10, 11, 379}, + dictWord{138, 11, 485}, + dictWord{6, 0, 249}, + dictWord{7, 0, 1234}, + dictWord{139, 0, 573}, + dictWord{133, 11, 716}, + dictWord{7, 11, 1570}, + dictWord{140, 11, 542}, + dictWord{136, 10, 364}, + dictWord{138, 0, 527}, + dictWord{ + 4, + 11, + 91, + }, + dictWord{5, 11, 388}, + dictWord{5, 11, 845}, + dictWord{6, 11, 206}, + dictWord{6, 11, 252}, + dictWord{6, 11, 365}, + dictWord{7, 11, 136}, + dictWord{7, 11, 531}, + dictWord{8, 11, 264}, + dictWord{136, 11, 621}, + dictWord{134, 0, 1419}, + dictWord{135, 11, 1441}, + dictWord{7, 0, 49}, + dictWord{7, 0, 392}, + dictWord{8, 0, 20}, + dictWord{8, 0, 172}, + dictWord{8, 0, 690}, + dictWord{9, 0, 383}, + dictWord{9, 0, 845}, + dictWord{10, 0, 48}, + dictWord{11, 0, 293}, + dictWord{11, 0, 832}, + dictWord{ + 11, + 0, + 920, + }, + dictWord{11, 0, 984}, + dictWord{141, 0, 221}, + dictWord{5, 0, 858}, + dictWord{133, 0, 992}, + dictWord{5, 0, 728}, + dictWord{137, 10, 792}, + dictWord{ + 5, + 10, + 909, + }, + dictWord{9, 10, 849}, + dictWord{138, 10, 805}, + dictWord{7, 0, 525}, + dictWord{7, 0, 1579}, + dictWord{8, 0, 497}, + dictWord{136, 0, 573}, + dictWord{6, 0, 268}, + dictWord{137, 0, 62}, + dictWord{135, 11, 576}, + dictWord{134, 0, 1201}, + dictWord{5, 11, 771}, + dictWord{5, 11, 863}, + dictWord{5, 11, 898}, + dictWord{ + 6, + 11, + 1632, + }, + dictWord{6, 11, 1644}, + dictWord{134, 11, 1780}, + dictWord{133, 11, 331}, + dictWord{7, 0, 193}, + dictWord{7, 0, 1105}, + dictWord{10, 0, 495}, + dictWord{ + 7, + 10, + 397, + }, + dictWord{8, 10, 124}, + dictWord{8, 10, 619}, + dictWord{9, 10, 305}, + dictWord{11, 10, 40}, + dictWord{12, 10, 349}, + dictWord{13, 10, 134}, + dictWord{ + 13, + 10, + 295, + }, + dictWord{14, 10, 155}, + dictWord{15, 10, 120}, + dictWord{146, 10, 105}, + dictWord{138, 0, 106}, + dictWord{6, 0, 859}, + dictWord{5, 11, 107}, + dictWord{ + 7, + 11, + 201, + }, + dictWord{136, 11, 518}, + dictWord{6, 11, 446}, + dictWord{135, 11, 1817}, + dictWord{13, 0, 23}, + dictWord{4, 10, 262}, + dictWord{135, 10, 342}, + dictWord{133, 10, 641}, + dictWord{137, 11, 851}, + dictWord{6, 0, 925}, + dictWord{137, 0, 813}, + dictWord{132, 11, 504}, + dictWord{6, 0, 613}, + dictWord{ + 136, + 0, + 223, + }, + dictWord{4, 10, 99}, + dictWord{6, 10, 250}, + dictWord{6, 10, 346}, + dictWord{8, 10, 127}, + dictWord{138, 10, 81}, + dictWord{136, 0, 953}, + dictWord{ + 132, + 10, + 915, + }, + dictWord{139, 11, 892}, + dictWord{5, 10, 75}, + dictWord{9, 10, 517}, + dictWord{10, 10, 470}, + dictWord{12, 10, 155}, + dictWord{141, 10, 224}, + dictWord{ + 4, + 0, + 666, + }, + dictWord{7, 0, 1017}, + dictWord{7, 11, 996}, + dictWord{138, 11, 390}, + dictWord{5, 11, 883}, + dictWord{133, 11, 975}, + dictWord{14, 10, 83}, + dictWord{ + 142, + 11, + 83, + }, + dictWord{4, 0, 670}, + dictWord{5, 11, 922}, + dictWord{134, 11, 1707}, + dictWord{135, 0, 216}, + dictWord{9, 0, 40}, + dictWord{11, 0, 136}, + dictWord{ + 135, + 11, + 787, + }, + dictWord{5, 10, 954}, + dictWord{5, 11, 993}, + dictWord{7, 11, 515}, + dictWord{137, 11, 91}, + dictWord{139, 0, 259}, + dictWord{7, 0, 1114}, + dictWord{ + 9, + 0, + 310, + }, + dictWord{9, 0, 682}, + dictWord{10, 0, 440}, + dictWord{13, 0, 40}, + dictWord{6, 10, 304}, + dictWord{8, 10, 418}, + dictWord{11, 10, 341}, + dictWord{ + 139, + 10, + 675, + }, + dictWord{14, 0, 296}, + dictWord{9, 10, 410}, + dictWord{139, 10, 425}, + dictWord{10, 11, 377}, + dictWord{12, 11, 363}, + dictWord{13, 11, 68}, + dictWord{ + 13, + 11, + 94, + }, + dictWord{14, 11, 108}, + dictWord{142, 11, 306}, + dictWord{7, 0, 1401}, + dictWord{135, 0, 1476}, + dictWord{4, 0, 296}, + dictWord{6, 0, 475}, + dictWord{ + 7, + 0, + 401, + }, + dictWord{7, 0, 1410}, + dictWord{7, 0, 1594}, + dictWord{7, 0, 1674}, + dictWord{8, 0, 63}, + dictWord{8, 0, 660}, + dictWord{137, 0, 74}, + dictWord{4, 0, 139}, + dictWord{4, 0, 388}, + dictWord{140, 0, 188}, + dictWord{132, 0, 797}, + dictWord{132, 11, 766}, + dictWord{5, 11, 103}, + dictWord{7, 11, 921}, + dictWord{8, 11, 580}, + dictWord{8, 11, 593}, + dictWord{8, 11, 630}, + dictWord{138, 11, 28}, + dictWord{4, 11, 911}, + dictWord{5, 11, 867}, + dictWord{133, 11, 1013}, + dictWord{134, 10, 14}, + dictWord{134, 0, 1572}, + dictWord{134, 10, 1708}, + dictWord{21, 0, 39}, + dictWord{5, 10, 113}, + dictWord{6, 10, 243}, + dictWord{7, 10, 1865}, + dictWord{ + 11, + 10, + 161, + }, + dictWord{16, 10, 37}, + dictWord{145, 10, 99}, + dictWord{7, 11, 1563}, + dictWord{141, 11, 182}, + dictWord{5, 11, 135}, + dictWord{6, 11, 519}, + dictWord{ + 7, + 11, + 1722, + }, + dictWord{10, 11, 271}, + dictWord{11, 11, 261}, + dictWord{145, 11, 54}, + dictWord{132, 10, 274}, + dictWord{134, 0, 1594}, + dictWord{4, 11, 300}, + dictWord{5, 11, 436}, + dictWord{135, 11, 484}, + dictWord{4, 0, 747}, + dictWord{6, 0, 290}, + dictWord{7, 0, 649}, + dictWord{7, 0, 1479}, + dictWord{135, 0, 1583}, + dictWord{133, 11, 535}, + dictWord{147, 11, 82}, + dictWord{133, 0, 232}, + dictWord{137, 0, 887}, + dictWord{135, 10, 166}, + dictWord{136, 0, 521}, + dictWord{4, 0, 14}, + dictWord{7, 0, 472}, + dictWord{7, 0, 1801}, + dictWord{10, 0, 748}, + dictWord{141, 0, 458}, + dictWord{134, 0, 741}, + dictWord{134, 0, 992}, + dictWord{16, 0, 111}, + dictWord{137, 10, 304}, + dictWord{4, 0, 425}, + dictWord{5, 11, 387}, + dictWord{7, 11, 557}, + dictWord{12, 11, 547}, + dictWord{142, 11, 86}, + dictWord{ + 135, + 11, + 1747, + }, + dictWord{5, 10, 654}, + dictWord{135, 11, 1489}, + dictWord{7, 0, 789}, + dictWord{4, 11, 6}, + dictWord{5, 11, 708}, + dictWord{136, 11, 75}, + dictWord{ + 6, + 10, + 273, + }, + dictWord{10, 10, 188}, + dictWord{13, 10, 377}, + dictWord{146, 10, 77}, + dictWord{6, 0, 1593}, + dictWord{4, 11, 303}, + dictWord{7, 11, 619}, + dictWord{ + 10, + 11, + 547, + }, + dictWord{10, 11, 687}, + dictWord{11, 11, 122}, + dictWord{140, 11, 601}, + dictWord{134, 0, 1768}, + dictWord{135, 10, 410}, + dictWord{138, 11, 772}, + dictWord{11, 0, 233}, + dictWord{139, 10, 524}, + dictWord{5, 0, 943}, + dictWord{134, 0, 1779}, + dictWord{134, 10, 1785}, + dictWord{136, 11, 529}, + dictWord{ + 132, + 0, + 955, + }, + dictWord{5, 0, 245}, + dictWord{6, 0, 576}, + dictWord{7, 0, 582}, + dictWord{136, 0, 225}, + dictWord{132, 10, 780}, + dictWord{142, 0, 241}, + dictWord{ + 134, + 0, + 1943, + }, + dictWord{4, 11, 106}, + dictWord{7, 11, 310}, + dictWord{7, 11, 1785}, + dictWord{10, 11, 690}, + dictWord{139, 11, 717}, + dictWord{134, 0, 1284}, + dictWord{5, 11, 890}, + dictWord{133, 11, 988}, + dictWord{6, 11, 626}, + dictWord{142, 11, 431}, + dictWord{10, 11, 706}, + dictWord{145, 11, 32}, + dictWord{ + 137, + 11, + 332, + }, + dictWord{132, 11, 698}, + dictWord{135, 0, 709}, + dictWord{5, 10, 948}, + dictWord{138, 11, 17}, + dictWord{136, 0, 554}, + dictWord{134, 0, 1564}, + dictWord{139, 10, 941}, + dictWord{132, 0, 443}, + dictWord{134, 0, 909}, + dictWord{134, 11, 84}, + dictWord{142, 0, 280}, + dictWord{4, 10, 532}, + dictWord{5, 10, 706}, + dictWord{135, 10, 662}, + dictWord{132, 0, 729}, + dictWord{5, 10, 837}, + dictWord{6, 10, 1651}, + dictWord{139, 10, 985}, + dictWord{135, 10, 1861}, + dictWord{ + 4, + 0, + 348, + }, + dictWord{152, 11, 3}, + dictWord{5, 11, 986}, + dictWord{6, 11, 130}, + dictWord{7, 11, 1582}, + dictWord{8, 11, 458}, + dictWord{10, 11, 101}, + dictWord{ + 10, + 11, + 318, + }, + dictWord{138, 11, 823}, + dictWord{134, 0, 758}, + dictWord{4, 0, 298}, + dictWord{137, 0, 848}, + dictWord{4, 10, 330}, + dictWord{7, 10, 933}, + dictWord{ + 7, + 10, + 2012, + }, + dictWord{136, 10, 292}, + dictWord{7, 11, 1644}, + dictWord{137, 11, 129}, + dictWord{6, 0, 1422}, + dictWord{9, 0, 829}, + dictWord{135, 10, 767}, + dictWord{5, 0, 164}, + dictWord{7, 0, 121}, + dictWord{142, 0, 189}, + dictWord{7, 0, 812}, + dictWord{7, 0, 1261}, + dictWord{7, 0, 1360}, + dictWord{9, 0, 632}, + dictWord{ + 140, + 0, + 352, + }, + dictWord{135, 11, 1788}, + dictWord{139, 0, 556}, + dictWord{135, 11, 997}, + dictWord{145, 10, 114}, + dictWord{4, 0, 172}, + dictWord{9, 0, 611}, + dictWord{10, 0, 436}, + dictWord{12, 0, 673}, + dictWord{13, 0, 255}, + dictWord{137, 10, 883}, + dictWord{11, 0, 530}, + dictWord{138, 10, 274}, + dictWord{133, 0, 844}, + dictWord{134, 0, 984}, + dictWord{13, 0, 232}, + dictWord{18, 0, 35}, + dictWord{4, 10, 703}, + dictWord{135, 10, 207}, + dictWord{132, 10, 571}, + dictWord{9, 0, 263}, + dictWord{10, 0, 147}, + dictWord{138, 0, 492}, + dictWord{7, 11, 1756}, + dictWord{137, 11, 98}, + dictWord{5, 10, 873}, + dictWord{5, 10, 960}, + dictWord{8, 10, 823}, + dictWord{137, 10, 881}, + dictWord{133, 0, 537}, + dictWord{132, 0, 859}, + dictWord{7, 11, 1046}, + dictWord{139, 11, 160}, + dictWord{137, 0, 842}, + dictWord{ + 139, + 10, + 283, + }, + dictWord{5, 10, 33}, + dictWord{6, 10, 470}, + dictWord{139, 10, 424}, + dictWord{6, 11, 45}, + dictWord{7, 11, 433}, + dictWord{8, 11, 129}, + dictWord{ + 9, + 11, + 21, + }, + dictWord{10, 11, 392}, + dictWord{11, 11, 79}, + dictWord{12, 11, 499}, + dictWord{13, 11, 199}, + dictWord{141, 11, 451}, + dictWord{135, 0, 1291}, + dictWord{135, 10, 1882}, + dictWord{7, 11, 558}, + dictWord{136, 11, 353}, + dictWord{134, 0, 1482}, + dictWord{5, 0, 230}, + dictWord{5, 0, 392}, + dictWord{6, 0, 420}, + dictWord{9, 0, 568}, + dictWord{140, 0, 612}, + dictWord{6, 0, 262}, + dictWord{7, 10, 90}, + dictWord{7, 10, 664}, + dictWord{7, 10, 830}, + dictWord{7, 10, 1380}, + dictWord{ + 7, + 10, + 2025, + }, + dictWord{8, 11, 81}, + dictWord{8, 10, 448}, + dictWord{8, 10, 828}, + dictWord{9, 11, 189}, + dictWord{9, 11, 201}, + dictWord{11, 11, 478}, + dictWord{ + 11, + 11, + 712, + }, + dictWord{141, 11, 338}, + dictWord{142, 0, 31}, + dictWord{5, 11, 353}, + dictWord{151, 11, 26}, + dictWord{132, 0, 753}, + dictWord{4, 0, 0}, + dictWord{ + 5, + 0, + 41, + }, + dictWord{7, 0, 1459}, + dictWord{7, 0, 1469}, + dictWord{7, 0, 1859}, + dictWord{9, 0, 549}, + dictWord{139, 0, 905}, + dictWord{9, 10, 417}, + dictWord{ + 137, + 10, + 493, + }, + dictWord{135, 11, 1113}, + dictWord{133, 0, 696}, + dictWord{141, 11, 448}, + dictWord{134, 10, 295}, + dictWord{132, 0, 834}, + dictWord{4, 0, 771}, + dictWord{5, 10, 1019}, + dictWord{6, 11, 25}, + dictWord{7, 11, 855}, + dictWord{7, 11, 1258}, + dictWord{144, 11, 32}, + dictWord{134, 0, 1076}, + dictWord{133, 0, 921}, + dictWord{133, 0, 674}, + dictWord{4, 11, 4}, + dictWord{7, 11, 1118}, + dictWord{7, 11, 1320}, + dictWord{7, 11, 1706}, + dictWord{8, 11, 277}, + dictWord{9, 11, 622}, + dictWord{10, 11, 9}, + dictWord{11, 11, 724}, + dictWord{12, 11, 350}, + dictWord{12, 11, 397}, + dictWord{13, 11, 28}, + dictWord{13, 11, 159}, + dictWord{15, 11, 89}, + dictWord{18, 11, 5}, + dictWord{19, 11, 9}, + dictWord{20, 11, 34}, + dictWord{150, 11, 47}, + dictWord{134, 10, 208}, + dictWord{6, 0, 444}, + dictWord{136, 0, 308}, + dictWord{ + 6, + 0, + 180, + }, + dictWord{7, 0, 1137}, + dictWord{8, 0, 751}, + dictWord{139, 0, 805}, + dictWord{4, 0, 183}, + dictWord{7, 0, 271}, + dictWord{11, 0, 824}, + dictWord{ + 11, + 0, + 952, + }, + dictWord{13, 0, 278}, + dictWord{13, 0, 339}, + dictWord{13, 0, 482}, + dictWord{14, 0, 424}, + dictWord{148, 0, 99}, + dictWord{7, 11, 317}, + dictWord{ + 135, + 11, + 569, + }, + dictWord{4, 0, 19}, + dictWord{5, 0, 477}, + dictWord{5, 0, 596}, + dictWord{6, 0, 505}, + dictWord{7, 0, 1221}, + dictWord{11, 0, 907}, + dictWord{12, 0, 209}, + dictWord{141, 0, 214}, + dictWord{135, 0, 1215}, + dictWord{6, 0, 271}, + dictWord{7, 0, 398}, + dictWord{8, 0, 387}, + dictWord{10, 0, 344}, + dictWord{7, 10, 448}, + dictWord{ + 7, + 10, + 1629, + }, + dictWord{7, 10, 1813}, + dictWord{8, 10, 442}, + dictWord{9, 10, 710}, + dictWord{10, 10, 282}, + dictWord{138, 10, 722}, + dictWord{11, 10, 844}, + dictWord{12, 10, 104}, + dictWord{140, 10, 625}, + dictWord{134, 11, 255}, + dictWord{133, 10, 787}, + dictWord{134, 0, 1645}, + dictWord{11, 11, 956}, + dictWord{ + 151, + 11, + 3, + }, + dictWord{6, 0, 92}, + dictWord{6, 0, 188}, + dictWord{7, 0, 209}, + dictWord{7, 0, 1269}, + dictWord{7, 0, 1524}, + dictWord{7, 0, 1876}, + dictWord{8, 0, 661}, + dictWord{10, 0, 42}, + dictWord{10, 0, 228}, + dictWord{11, 0, 58}, + dictWord{11, 0, 1020}, + dictWord{12, 0, 58}, + dictWord{12, 0, 118}, + dictWord{141, 0, 32}, + dictWord{ + 4, + 0, + 459, + }, + dictWord{133, 0, 966}, + dictWord{4, 11, 536}, + dictWord{7, 11, 1141}, + dictWord{10, 11, 723}, + dictWord{139, 11, 371}, + dictWord{140, 0, 330}, + dictWord{134, 0, 1557}, + dictWord{7, 11, 285}, + dictWord{135, 11, 876}, + dictWord{136, 10, 491}, + dictWord{135, 11, 560}, + dictWord{6, 0, 18}, + dictWord{7, 0, 179}, + dictWord{7, 0, 932}, + dictWord{8, 0, 548}, + dictWord{8, 0, 757}, + dictWord{9, 0, 54}, + dictWord{9, 0, 65}, + dictWord{9, 0, 532}, + dictWord{9, 0, 844}, + dictWord{10, 0, 113}, + dictWord{10, 0, 117}, + dictWord{10, 0, 315}, + dictWord{10, 0, 560}, + dictWord{10, 0, 622}, + dictWord{10, 0, 798}, + dictWord{11, 0, 153}, + dictWord{11, 0, 351}, + dictWord{ + 11, + 0, + 375, + }, + dictWord{12, 0, 78}, + dictWord{12, 0, 151}, + dictWord{12, 0, 392}, + dictWord{12, 0, 666}, + dictWord{14, 0, 248}, + dictWord{143, 0, 23}, + dictWord{ + 6, + 0, + 1742, + }, + dictWord{132, 11, 690}, + dictWord{4, 10, 403}, + dictWord{5, 10, 441}, + dictWord{7, 10, 450}, + dictWord{10, 10, 840}, + dictWord{11, 10, 101}, + dictWord{ + 12, + 10, + 193, + }, + dictWord{141, 10, 430}, + dictWord{133, 0, 965}, + dictWord{134, 0, 182}, + dictWord{10, 0, 65}, + dictWord{10, 0, 488}, + dictWord{138, 0, 497}, + dictWord{135, 11, 1346}, + dictWord{6, 0, 973}, + dictWord{6, 0, 1158}, + dictWord{10, 11, 200}, + dictWord{19, 11, 2}, + dictWord{151, 11, 22}, + dictWord{4, 11, 190}, + dictWord{133, 11, 554}, + dictWord{133, 10, 679}, + dictWord{7, 0, 328}, + dictWord{137, 10, 326}, + dictWord{133, 11, 1001}, + dictWord{9, 0, 588}, + dictWord{ + 138, + 0, + 260, + }, + dictWord{133, 11, 446}, + dictWord{135, 10, 1128}, + dictWord{135, 10, 1796}, + dictWord{147, 11, 119}, + dictWord{134, 0, 1786}, + dictWord{ + 6, + 0, + 1328, + }, + dictWord{6, 0, 1985}, + dictWord{8, 0, 962}, + dictWord{138, 0, 1017}, + dictWord{135, 0, 308}, + dictWord{11, 0, 508}, + dictWord{4, 10, 574}, + dictWord{ + 7, + 10, + 350, + }, + dictWord{7, 10, 1024}, + dictWord{8, 10, 338}, + dictWord{9, 10, 677}, + dictWord{138, 10, 808}, + dictWord{138, 11, 752}, + dictWord{135, 10, 1081}, + dictWord{137, 11, 96}, + dictWord{7, 10, 1676}, + dictWord{135, 10, 2037}, + dictWord{136, 0, 588}, + dictWord{132, 11, 304}, + dictWord{133, 0, 614}, + dictWord{ + 140, + 0, + 793, + }, + dictWord{136, 0, 287}, + dictWord{137, 10, 297}, + dictWord{141, 10, 37}, + dictWord{6, 11, 53}, + dictWord{6, 11, 199}, + dictWord{7, 11, 1408}, + dictWord{ + 8, + 11, + 32, + }, + dictWord{8, 11, 93}, + dictWord{9, 11, 437}, + dictWord{10, 11, 397}, + dictWord{10, 11, 629}, + dictWord{11, 11, 593}, + dictWord{11, 11, 763}, + dictWord{ + 13, + 11, + 326, + }, + dictWord{145, 11, 35}, + dictWord{134, 11, 105}, + dictWord{9, 11, 320}, + dictWord{10, 11, 506}, + dictWord{138, 11, 794}, + dictWord{5, 11, 114}, + dictWord{5, 11, 255}, + dictWord{141, 11, 285}, + dictWord{140, 0, 290}, + dictWord{7, 11, 2035}, + dictWord{8, 11, 19}, + dictWord{9, 11, 89}, + dictWord{138, 11, 831}, + dictWord{134, 0, 1136}, + dictWord{7, 0, 719}, + dictWord{8, 0, 796}, + dictWord{8, 0, 809}, + dictWord{8, 0, 834}, + dictWord{6, 10, 306}, + dictWord{7, 10, 1140}, + dictWord{ + 7, + 10, + 1340, + }, + dictWord{8, 10, 133}, + dictWord{138, 10, 449}, + dictWord{139, 10, 1011}, + dictWord{5, 0, 210}, + dictWord{6, 0, 213}, + dictWord{7, 0, 60}, + dictWord{ + 10, + 0, + 364, + }, + dictWord{139, 0, 135}, + dictWord{5, 0, 607}, + dictWord{8, 0, 326}, + dictWord{136, 0, 490}, + dictWord{138, 11, 176}, + dictWord{132, 0, 701}, + dictWord{ + 5, + 0, + 472, + }, + dictWord{7, 0, 380}, + dictWord{137, 0, 758}, + dictWord{135, 0, 1947}, + dictWord{6, 0, 1079}, + dictWord{138, 0, 278}, + dictWord{138, 11, 391}, + dictWord{ + 5, + 10, + 329, + }, + dictWord{8, 10, 260}, + dictWord{139, 11, 156}, + dictWord{4, 0, 386}, + dictWord{7, 0, 41}, + dictWord{8, 0, 405}, + dictWord{8, 0, 728}, + dictWord{9, 0, 497}, + dictWord{11, 0, 110}, + dictWord{11, 0, 360}, + dictWord{15, 0, 37}, + dictWord{144, 0, 84}, + dictWord{5, 0, 46}, + dictWord{7, 0, 1452}, + dictWord{7, 0, 1480}, + dictWord{ + 8, + 0, + 634, + }, + dictWord{140, 0, 472}, + dictWord{136, 0, 961}, + dictWord{4, 0, 524}, + dictWord{136, 0, 810}, + dictWord{10, 0, 238}, + dictWord{141, 0, 33}, + dictWord{ + 132, + 10, + 657, + }, + dictWord{152, 10, 7}, + dictWord{133, 0, 532}, + dictWord{5, 0, 997}, + dictWord{135, 10, 1665}, + dictWord{7, 11, 594}, + dictWord{7, 11, 851}, + dictWord{ + 7, + 11, + 1858, + }, + dictWord{9, 11, 411}, + dictWord{9, 11, 574}, + dictWord{9, 11, 666}, + dictWord{9, 11, 737}, + dictWord{10, 11, 346}, + dictWord{10, 11, 712}, + dictWord{ + 11, + 11, + 246, + }, + dictWord{11, 11, 432}, + dictWord{11, 11, 517}, + dictWord{11, 11, 647}, + dictWord{11, 11, 679}, + dictWord{11, 11, 727}, + dictWord{12, 11, 304}, + dictWord{12, 11, 305}, + dictWord{12, 11, 323}, + dictWord{12, 11, 483}, + dictWord{12, 11, 572}, + dictWord{12, 11, 593}, + dictWord{12, 11, 602}, + dictWord{ + 13, + 11, + 95, + }, + dictWord{13, 11, 101}, + dictWord{13, 11, 171}, + dictWord{13, 11, 315}, + dictWord{13, 11, 378}, + dictWord{13, 11, 425}, + dictWord{13, 11, 475}, + dictWord{ + 14, + 11, + 63, + }, + dictWord{14, 11, 380}, + dictWord{14, 11, 384}, + dictWord{15, 11, 133}, + dictWord{18, 11, 112}, + dictWord{148, 11, 72}, + dictWord{5, 11, 955}, + dictWord{136, 11, 814}, + dictWord{134, 0, 1301}, + dictWord{5, 10, 66}, + dictWord{7, 10, 1896}, + dictWord{136, 10, 288}, + dictWord{133, 11, 56}, + dictWord{ + 134, + 10, + 1643, + }, + dictWord{6, 0, 1298}, + dictWord{148, 11, 100}, + dictWord{5, 0, 782}, + dictWord{5, 0, 829}, + dictWord{6, 0, 671}, + dictWord{6, 0, 1156}, + dictWord{6, 0, 1738}, + dictWord{137, 11, 621}, + dictWord{4, 0, 306}, + dictWord{5, 0, 570}, + dictWord{7, 0, 1347}, + dictWord{5, 10, 91}, + dictWord{5, 10, 648}, + dictWord{5, 10, 750}, + dictWord{ + 5, + 10, + 781, + }, + dictWord{6, 10, 54}, + dictWord{6, 10, 112}, + dictWord{6, 10, 402}, + dictWord{6, 10, 1732}, + dictWord{7, 10, 315}, + dictWord{7, 10, 749}, + dictWord{ + 7, + 10, + 1900, + }, + dictWord{9, 10, 78}, + dictWord{9, 10, 508}, + dictWord{10, 10, 611}, + dictWord{10, 10, 811}, + dictWord{11, 10, 510}, + dictWord{11, 10, 728}, + dictWord{ + 13, + 10, + 36, + }, + dictWord{14, 10, 39}, + dictWord{16, 10, 83}, + dictWord{17, 10, 124}, + dictWord{148, 10, 30}, + dictWord{8, 10, 570}, + dictWord{9, 11, 477}, + dictWord{ + 141, + 11, + 78, + }, + dictWord{4, 11, 639}, + dictWord{10, 11, 4}, + dictWord{10, 10, 322}, + dictWord{10, 10, 719}, + dictWord{11, 10, 407}, + dictWord{11, 11, 638}, + dictWord{ + 12, + 11, + 177, + }, + dictWord{148, 11, 57}, + dictWord{7, 0, 1823}, + dictWord{139, 0, 693}, + dictWord{7, 0, 759}, + dictWord{5, 11, 758}, + dictWord{8, 10, 125}, + dictWord{ + 8, + 10, + 369, + }, + dictWord{8, 10, 524}, + dictWord{10, 10, 486}, + dictWord{11, 10, 13}, + dictWord{11, 10, 381}, + dictWord{11, 10, 736}, + dictWord{11, 10, 766}, + dictWord{ + 11, + 10, + 845, + }, + dictWord{13, 10, 114}, + dictWord{13, 10, 292}, + dictWord{142, 10, 47}, + dictWord{7, 0, 1932}, + dictWord{6, 10, 1684}, + dictWord{6, 10, 1731}, + dictWord{7, 10, 356}, + dictWord{8, 10, 54}, + dictWord{8, 10, 221}, + dictWord{9, 10, 225}, + dictWord{9, 10, 356}, + dictWord{10, 10, 77}, + dictWord{10, 10, 446}, + dictWord{ + 10, + 10, + 731, + }, + dictWord{12, 10, 404}, + dictWord{141, 10, 491}, + dictWord{135, 11, 552}, + dictWord{135, 11, 1112}, + dictWord{4, 0, 78}, + dictWord{5, 0, 96}, + dictWord{ + 5, + 0, + 182, + }, + dictWord{6, 0, 1257}, + dictWord{7, 0, 1724}, + dictWord{7, 0, 1825}, + dictWord{10, 0, 394}, + dictWord{10, 0, 471}, + dictWord{11, 0, 532}, + dictWord{ + 14, + 0, + 340, + }, + dictWord{145, 0, 88}, + dictWord{139, 11, 328}, + dictWord{135, 0, 1964}, + dictWord{132, 10, 411}, + dictWord{4, 10, 80}, + dictWord{5, 10, 44}, + dictWord{ + 137, + 11, + 133, + }, + dictWord{5, 11, 110}, + dictWord{6, 11, 169}, + dictWord{6, 11, 1702}, + dictWord{7, 11, 400}, + dictWord{8, 11, 538}, + dictWord{9, 11, 184}, + dictWord{ + 9, + 11, + 524, + }, + dictWord{140, 11, 218}, + dictWord{4, 0, 521}, + dictWord{5, 10, 299}, + dictWord{7, 10, 1083}, + dictWord{140, 11, 554}, + dictWord{6, 11, 133}, + dictWord{ + 9, + 11, + 353, + }, + dictWord{12, 11, 628}, + dictWord{146, 11, 79}, + dictWord{6, 0, 215}, + dictWord{7, 0, 584}, + dictWord{7, 0, 1028}, + dictWord{7, 0, 1473}, + dictWord{ + 7, + 0, + 1721, + }, + dictWord{9, 0, 424}, + dictWord{138, 0, 779}, + dictWord{7, 0, 857}, + dictWord{7, 0, 1209}, + dictWord{7, 10, 1713}, + dictWord{9, 10, 537}, + dictWord{ + 10, + 10, + 165, + }, + dictWord{12, 10, 219}, + dictWord{140, 10, 561}, + dictWord{4, 10, 219}, + dictWord{6, 11, 93}, + dictWord{7, 11, 1422}, + dictWord{7, 10, 1761}, + dictWord{ + 7, + 11, + 1851, + }, + dictWord{8, 11, 673}, + dictWord{9, 10, 86}, + dictWord{9, 11, 529}, + dictWord{140, 11, 43}, + dictWord{137, 11, 371}, + dictWord{136, 0, 671}, + dictWord{ + 5, + 0, + 328, + }, + dictWord{135, 0, 918}, + dictWord{132, 0, 529}, + dictWord{9, 11, 25}, + dictWord{10, 11, 467}, + dictWord{138, 11, 559}, + dictWord{4, 11, 335}, + dictWord{ + 135, + 11, + 942, + }, + dictWord{134, 0, 716}, + dictWord{134, 0, 1509}, + dictWord{6, 0, 67}, + dictWord{7, 0, 258}, + dictWord{7, 0, 1630}, + dictWord{9, 0, 354}, + dictWord{ + 9, + 0, + 675, + }, + dictWord{10, 0, 830}, + dictWord{14, 0, 80}, + dictWord{17, 0, 80}, + dictWord{140, 10, 428}, + dictWord{134, 0, 1112}, + dictWord{6, 0, 141}, + dictWord{7, 0, 225}, + dictWord{9, 0, 59}, + dictWord{9, 0, 607}, + dictWord{10, 0, 312}, + dictWord{11, 0, 687}, + dictWord{12, 0, 555}, + dictWord{13, 0, 373}, + dictWord{13, 0, 494}, + dictWord{ + 148, + 0, + 58, + }, + dictWord{133, 10, 514}, + dictWord{8, 11, 39}, + dictWord{10, 11, 773}, + dictWord{11, 11, 84}, + dictWord{12, 11, 205}, + dictWord{142, 11, 1}, + dictWord{ + 8, + 0, + 783, + }, + dictWord{5, 11, 601}, + dictWord{133, 11, 870}, + dictWord{136, 11, 594}, + dictWord{4, 10, 55}, + dictWord{5, 10, 301}, + dictWord{6, 10, 571}, + dictWord{ + 14, + 10, + 49, + }, + dictWord{146, 10, 102}, + dictWord{132, 11, 181}, + dictWord{134, 11, 1652}, + dictWord{133, 10, 364}, + dictWord{4, 11, 97}, + dictWord{5, 11, 147}, + dictWord{6, 11, 286}, + dictWord{7, 11, 1362}, + dictWord{141, 11, 176}, + dictWord{4, 10, 76}, + dictWord{7, 10, 1550}, + dictWord{9, 10, 306}, + dictWord{9, 10, 430}, + dictWord{9, 10, 663}, + dictWord{10, 10, 683}, + dictWord{11, 10, 427}, + dictWord{11, 10, 753}, + dictWord{12, 10, 334}, + dictWord{12, 10, 442}, + dictWord{ + 14, + 10, + 258, + }, + dictWord{14, 10, 366}, + dictWord{143, 10, 131}, + dictWord{137, 10, 52}, + dictWord{6, 0, 955}, + dictWord{134, 0, 1498}, + dictWord{6, 11, 375}, + dictWord{ + 7, + 11, + 169, + }, + dictWord{7, 11, 254}, + dictWord{136, 11, 780}, + dictWord{7, 0, 430}, + dictWord{11, 0, 46}, + dictWord{14, 0, 343}, + dictWord{142, 11, 343}, + dictWord{ + 135, + 0, + 1183, + }, + dictWord{5, 0, 602}, + dictWord{7, 0, 2018}, + dictWord{9, 0, 418}, + dictWord{9, 0, 803}, + dictWord{135, 11, 1447}, + dictWord{8, 0, 677}, + dictWord{ + 135, + 11, + 1044, + }, + dictWord{139, 11, 285}, + dictWord{4, 10, 656}, + dictWord{135, 10, 779}, + dictWord{135, 10, 144}, + dictWord{5, 11, 629}, + dictWord{ + 135, + 11, + 1549, + }, + dictWord{135, 10, 1373}, + dictWord{138, 11, 209}, + dictWord{7, 10, 554}, + dictWord{7, 10, 605}, + dictWord{141, 10, 10}, + dictWord{5, 10, 838}, + dictWord{ + 5, + 10, + 841, + }, + dictWord{134, 10, 1649}, + dictWord{133, 10, 1012}, + dictWord{6, 0, 1357}, + dictWord{134, 0, 1380}, + dictWord{144, 0, 53}, + dictWord{6, 0, 590}, + dictWord{7, 10, 365}, + dictWord{7, 10, 1357}, + dictWord{7, 10, 1497}, + dictWord{8, 10, 154}, + dictWord{141, 10, 281}, + dictWord{133, 10, 340}, + dictWord{ + 132, + 11, + 420, + }, + dictWord{135, 0, 329}, + dictWord{147, 11, 32}, + dictWord{4, 0, 469}, + dictWord{10, 11, 429}, + dictWord{139, 10, 495}, + dictWord{8, 10, 261}, + dictWord{ + 9, + 10, + 144, + }, + dictWord{9, 10, 466}, + dictWord{10, 10, 370}, + dictWord{12, 10, 470}, + dictWord{13, 10, 144}, + dictWord{142, 10, 348}, + dictWord{142, 0, 460}, + dictWord{4, 11, 325}, + dictWord{9, 10, 897}, + dictWord{138, 11, 125}, + dictWord{6, 0, 1743}, + dictWord{6, 10, 248}, + dictWord{9, 10, 546}, + dictWord{10, 10, 535}, + dictWord{11, 10, 681}, + dictWord{141, 10, 135}, + dictWord{4, 0, 990}, + dictWord{5, 0, 929}, + dictWord{6, 0, 340}, + dictWord{8, 0, 376}, + dictWord{8, 0, 807}, + dictWord{ + 8, + 0, + 963, + }, + dictWord{8, 0, 980}, + dictWord{138, 0, 1007}, + dictWord{134, 0, 1603}, + dictWord{140, 0, 250}, + dictWord{4, 11, 714}, + dictWord{133, 11, 469}, + dictWord{134, 10, 567}, + dictWord{136, 10, 445}, + dictWord{5, 0, 218}, + dictWord{7, 0, 1610}, + dictWord{8, 0, 646}, + dictWord{10, 0, 83}, + dictWord{11, 11, 138}, + dictWord{140, 11, 40}, + dictWord{7, 0, 1512}, + dictWord{135, 0, 1794}, + dictWord{135, 11, 1216}, + dictWord{11, 0, 0}, + dictWord{16, 0, 78}, + dictWord{132, 11, 718}, + dictWord{133, 0, 571}, + dictWord{132, 0, 455}, + dictWord{134, 0, 1012}, + dictWord{5, 11, 124}, + dictWord{5, 11, 144}, + dictWord{6, 11, 548}, + dictWord{7, 11, 15}, + dictWord{7, 11, 153}, + dictWord{137, 11, 629}, + dictWord{142, 11, 10}, + dictWord{6, 11, 75}, + dictWord{7, 11, 1531}, + dictWord{8, 11, 416}, + dictWord{9, 11, 240}, + dictWord{9, 11, 275}, + dictWord{10, 11, 100}, + dictWord{11, 11, 658}, + dictWord{11, 11, 979}, + dictWord{12, 11, 86}, + dictWord{13, 11, 468}, + dictWord{14, 11, 66}, + dictWord{14, 11, 207}, + dictWord{15, 11, 20}, + dictWord{15, 11, 25}, + dictWord{144, 11, 58}, + dictWord{132, 10, 577}, + dictWord{5, 11, 141}, + dictWord{ + 5, + 11, + 915, + }, + dictWord{6, 11, 1783}, + dictWord{7, 11, 211}, + dictWord{7, 11, 698}, + dictWord{7, 11, 1353}, + dictWord{9, 11, 83}, + dictWord{9, 11, 281}, + dictWord{ + 10, + 11, + 376, + }, + dictWord{10, 11, 431}, + dictWord{11, 11, 543}, + dictWord{12, 11, 664}, + dictWord{13, 11, 280}, + dictWord{13, 11, 428}, + dictWord{14, 11, 61}, + dictWord{ + 14, + 11, + 128, + }, + dictWord{17, 11, 52}, + dictWord{145, 11, 81}, + dictWord{6, 0, 161}, + dictWord{7, 0, 372}, + dictWord{137, 0, 597}, + dictWord{132, 0, 349}, + dictWord{ + 10, + 11, + 702, + }, + dictWord{139, 11, 245}, + dictWord{134, 0, 524}, + dictWord{134, 10, 174}, + dictWord{6, 0, 432}, + dictWord{9, 0, 751}, + dictWord{139, 0, 322}, + dictWord{147, 11, 94}, + dictWord{4, 11, 338}, + dictWord{133, 11, 400}, + dictWord{5, 0, 468}, + dictWord{10, 0, 325}, + dictWord{11, 0, 856}, + dictWord{12, 0, 345}, + dictWord{143, 0, 104}, + dictWord{133, 0, 223}, + dictWord{132, 0, 566}, + dictWord{4, 11, 221}, + dictWord{5, 11, 659}, + dictWord{5, 11, 989}, + dictWord{7, 11, 697}, + dictWord{7, 11, 1211}, + dictWord{138, 11, 284}, + dictWord{135, 11, 1070}, + dictWord{4, 0, 59}, + dictWord{135, 0, 1394}, + dictWord{6, 0, 436}, + dictWord{11, 0, 481}, + dictWord{5, 10, 878}, + dictWord{133, 10, 972}, + dictWord{4, 0, 48}, + dictWord{5, 0, 271}, + dictWord{135, 0, 953}, + dictWord{5, 0, 610}, + dictWord{136, 0, 457}, + dictWord{ + 4, + 0, + 773, + }, + dictWord{5, 0, 618}, + dictWord{137, 0, 756}, + dictWord{133, 0, 755}, + dictWord{135, 0, 1217}, + dictWord{138, 11, 507}, + dictWord{132, 10, 351}, + dictWord{132, 0, 197}, + dictWord{143, 11, 78}, + dictWord{4, 11, 188}, + dictWord{7, 11, 805}, + dictWord{11, 11, 276}, + dictWord{142, 11, 293}, + dictWord{ + 5, + 11, + 884, + }, + dictWord{139, 11, 991}, + dictWord{132, 10, 286}, + dictWord{10, 0, 259}, + dictWord{10, 0, 428}, + dictWord{7, 10, 438}, + dictWord{7, 10, 627}, + dictWord{ + 7, + 10, + 1516, + }, + dictWord{8, 10, 40}, + dictWord{9, 10, 56}, + dictWord{9, 10, 294}, + dictWord{11, 10, 969}, + dictWord{11, 10, 995}, + dictWord{146, 10, 148}, + dictWord{ + 4, + 0, + 356, + }, + dictWord{5, 0, 217}, + dictWord{5, 0, 492}, + dictWord{5, 0, 656}, + dictWord{8, 0, 544}, + dictWord{136, 11, 544}, + dictWord{5, 0, 259}, + dictWord{6, 0, 1230}, + dictWord{7, 0, 414}, + dictWord{7, 0, 854}, + dictWord{142, 0, 107}, + dictWord{132, 0, 1007}, + dictWord{15, 0, 14}, + dictWord{144, 0, 5}, + dictWord{6, 0, 1580}, + dictWord{ + 132, + 10, + 738, + }, + dictWord{132, 11, 596}, + dictWord{132, 0, 673}, + dictWord{133, 10, 866}, + dictWord{6, 0, 1843}, + dictWord{135, 11, 1847}, + dictWord{4, 0, 165}, + dictWord{7, 0, 1398}, + dictWord{135, 0, 1829}, + dictWord{135, 11, 1634}, + dictWord{147, 11, 65}, + dictWord{6, 0, 885}, + dictWord{6, 0, 1009}, + dictWord{ + 137, + 0, + 809, + }, + dictWord{133, 10, 116}, + dictWord{132, 10, 457}, + dictWord{136, 11, 770}, + dictWord{9, 0, 498}, + dictWord{12, 0, 181}, + dictWord{10, 11, 361}, + dictWord{142, 11, 316}, + dictWord{134, 11, 595}, + dictWord{5, 0, 9}, + dictWord{7, 0, 297}, + dictWord{7, 0, 966}, + dictWord{140, 0, 306}, + dictWord{4, 11, 89}, + dictWord{ + 5, + 11, + 489, + }, + dictWord{6, 11, 315}, + dictWord{7, 11, 553}, + dictWord{7, 11, 1745}, + dictWord{138, 11, 243}, + dictWord{134, 0, 1487}, + dictWord{132, 0, 437}, + dictWord{ + 5, + 0, + 146, + }, + dictWord{6, 0, 411}, + dictWord{138, 0, 721}, + dictWord{5, 10, 527}, + dictWord{6, 10, 189}, + dictWord{135, 10, 859}, + dictWord{11, 10, 104}, + dictWord{ + 11, + 10, + 554, + }, + dictWord{15, 10, 60}, + dictWord{143, 10, 125}, + dictWord{6, 11, 1658}, + dictWord{9, 11, 3}, + dictWord{10, 11, 154}, + dictWord{11, 11, 641}, + dictWord{13, 11, 85}, + dictWord{13, 11, 201}, + dictWord{141, 11, 346}, + dictWord{6, 0, 177}, + dictWord{135, 0, 467}, + dictWord{134, 0, 1377}, + dictWord{ + 134, + 10, + 116, + }, + dictWord{136, 11, 645}, + dictWord{4, 11, 166}, + dictWord{5, 11, 505}, + dictWord{6, 11, 1670}, + dictWord{137, 11, 110}, + dictWord{133, 10, 487}, + dictWord{ + 4, + 10, + 86, + }, + dictWord{5, 10, 667}, + dictWord{5, 10, 753}, + dictWord{6, 10, 316}, + dictWord{6, 10, 455}, + dictWord{135, 10, 946}, + dictWord{133, 0, 200}, + dictWord{132, 0, 959}, + dictWord{6, 0, 1928}, + dictWord{134, 0, 1957}, + dictWord{139, 11, 203}, + dictWord{150, 10, 45}, + dictWord{4, 10, 79}, + dictWord{7, 10, 1773}, + dictWord{10, 10, 450}, + dictWord{11, 10, 589}, + dictWord{13, 10, 332}, + dictWord{13, 10, 493}, + dictWord{14, 10, 183}, + dictWord{14, 10, 334}, + dictWord{ + 14, + 10, + 362, + }, + dictWord{14, 10, 368}, + dictWord{14, 10, 376}, + dictWord{14, 10, 379}, + dictWord{19, 10, 90}, + dictWord{19, 10, 103}, + dictWord{19, 10, 127}, + dictWord{148, 10, 90}, + dictWord{6, 0, 1435}, + dictWord{135, 11, 1275}, + dictWord{134, 0, 481}, + dictWord{7, 11, 445}, + dictWord{8, 11, 307}, + dictWord{8, 11, 704}, + dictWord{10, 11, 41}, + dictWord{10, 11, 439}, + dictWord{11, 11, 237}, + dictWord{11, 11, 622}, + dictWord{140, 11, 201}, + dictWord{135, 11, 869}, + dictWord{ + 4, + 0, + 84, + }, + dictWord{7, 0, 1482}, + dictWord{10, 0, 76}, + dictWord{138, 0, 142}, + dictWord{11, 11, 277}, + dictWord{144, 11, 14}, + dictWord{135, 11, 1977}, + dictWord{ + 4, + 11, + 189, + }, + dictWord{5, 11, 713}, + dictWord{136, 11, 57}, + dictWord{133, 0, 1015}, + dictWord{138, 11, 371}, + dictWord{4, 0, 315}, + dictWord{5, 0, 507}, + dictWord{ + 135, + 0, + 1370, + }, + dictWord{4, 11, 552}, + dictWord{142, 10, 381}, + dictWord{9, 0, 759}, + dictWord{16, 0, 31}, + dictWord{16, 0, 39}, + dictWord{16, 0, 75}, + dictWord{18, 0, 24}, + dictWord{20, 0, 42}, + dictWord{152, 0, 1}, + dictWord{134, 0, 712}, + dictWord{134, 0, 1722}, + dictWord{133, 10, 663}, + dictWord{133, 10, 846}, + dictWord{ + 8, + 0, + 222, + }, + dictWord{8, 0, 476}, + dictWord{9, 0, 238}, + dictWord{11, 0, 516}, + dictWord{11, 0, 575}, + dictWord{15, 0, 109}, + dictWord{146, 0, 100}, + dictWord{7, 0, 1402}, + dictWord{7, 0, 1414}, + dictWord{12, 0, 456}, + dictWord{5, 10, 378}, + dictWord{8, 10, 465}, + dictWord{9, 10, 286}, + dictWord{10, 10, 185}, + dictWord{10, 10, 562}, + dictWord{10, 10, 635}, + dictWord{11, 10, 31}, + dictWord{11, 10, 393}, + dictWord{13, 10, 312}, + dictWord{18, 10, 65}, + dictWord{18, 10, 96}, + dictWord{147, 10, 89}, + dictWord{4, 0, 986}, + dictWord{6, 0, 1958}, + dictWord{6, 0, 2032}, + dictWord{8, 0, 934}, + dictWord{138, 0, 985}, + dictWord{7, 10, 1880}, + dictWord{9, 10, 680}, + dictWord{139, 10, 798}, + dictWord{134, 10, 1770}, + dictWord{145, 11, 49}, + dictWord{132, 11, 614}, + dictWord{132, 10, 648}, + dictWord{5, 10, 945}, + dictWord{ + 6, + 10, + 1656, + }, + dictWord{6, 10, 1787}, + dictWord{7, 10, 167}, + dictWord{8, 10, 824}, + dictWord{9, 10, 391}, + dictWord{10, 10, 375}, + dictWord{139, 10, 185}, + dictWord{138, 11, 661}, + dictWord{7, 0, 1273}, + dictWord{135, 11, 1945}, + dictWord{7, 0, 706}, + dictWord{7, 0, 1058}, + dictWord{138, 0, 538}, + dictWord{7, 10, 1645}, + dictWord{8, 10, 352}, + dictWord{137, 10, 249}, + dictWord{132, 10, 152}, + dictWord{11, 0, 92}, + dictWord{11, 0, 196}, + dictWord{11, 0, 409}, + dictWord{11, 0, 450}, + dictWord{11, 0, 666}, + dictWord{11, 0, 777}, + dictWord{12, 0, 262}, + dictWord{13, 0, 385}, + dictWord{13, 0, 393}, + dictWord{15, 0, 115}, + dictWord{16, 0, 45}, + dictWord{145, 0, 82}, + dictWord{133, 10, 1006}, + dictWord{6, 0, 40}, + dictWord{135, 0, 1781}, + dictWord{9, 11, 614}, + dictWord{139, 11, 327}, + dictWord{5, 10, 420}, + dictWord{135, 10, 1449}, + dictWord{135, 0, 431}, + dictWord{10, 0, 97}, + dictWord{135, 10, 832}, + dictWord{6, 0, 423}, + dictWord{7, 0, 665}, + dictWord{ + 135, + 0, + 1210, + }, + dictWord{7, 0, 237}, + dictWord{8, 0, 664}, + dictWord{9, 0, 42}, + dictWord{9, 0, 266}, + dictWord{9, 0, 380}, + dictWord{9, 0, 645}, + dictWord{10, 0, 177}, + dictWord{ + 138, + 0, + 276, + }, + dictWord{7, 0, 264}, + dictWord{133, 10, 351}, + dictWord{8, 0, 213}, + dictWord{5, 10, 40}, + dictWord{7, 10, 598}, + dictWord{7, 10, 1638}, + dictWord{ + 9, + 10, + 166, + }, + dictWord{9, 10, 640}, + dictWord{9, 10, 685}, + dictWord{9, 10, 773}, + dictWord{11, 10, 215}, + dictWord{13, 10, 65}, + dictWord{14, 10, 172}, + dictWord{ + 14, + 10, + 317, + }, + dictWord{145, 10, 6}, + dictWord{5, 11, 84}, + dictWord{134, 11, 163}, + dictWord{8, 10, 60}, + dictWord{9, 10, 343}, + dictWord{139, 10, 769}, + dictWord{ + 137, + 0, + 455, + }, + dictWord{133, 11, 410}, + dictWord{8, 0, 906}, + dictWord{12, 0, 700}, + dictWord{12, 0, 706}, + dictWord{140, 0, 729}, + dictWord{21, 11, 33}, + dictWord{ + 150, + 11, + 40, + }, + dictWord{7, 10, 1951}, + dictWord{8, 10, 765}, + dictWord{8, 10, 772}, + dictWord{140, 10, 671}, + dictWord{7, 10, 108}, + dictWord{8, 10, 219}, + dictWord{ + 8, + 10, + 388, + }, + dictWord{9, 10, 639}, + dictWord{9, 10, 775}, + dictWord{11, 10, 275}, + dictWord{140, 10, 464}, + dictWord{5, 11, 322}, + dictWord{7, 11, 1941}, + dictWord{ + 8, + 11, + 186, + }, + dictWord{9, 11, 262}, + dictWord{10, 11, 187}, + dictWord{14, 11, 208}, + dictWord{146, 11, 130}, + dictWord{139, 0, 624}, + dictWord{8, 0, 574}, + dictWord{ + 5, + 11, + 227, + }, + dictWord{140, 11, 29}, + dictWord{7, 11, 1546}, + dictWord{11, 11, 299}, + dictWord{142, 11, 407}, + dictWord{5, 10, 15}, + dictWord{6, 10, 56}, + dictWord{ + 7, + 10, + 1758, + }, + dictWord{8, 10, 500}, + dictWord{9, 10, 730}, + dictWord{11, 10, 331}, + dictWord{13, 10, 150}, + dictWord{142, 10, 282}, + dictWord{7, 11, 1395}, + dictWord{8, 11, 486}, + dictWord{9, 11, 236}, + dictWord{9, 11, 878}, + dictWord{10, 11, 218}, + dictWord{11, 11, 95}, + dictWord{19, 11, 17}, + dictWord{147, 11, 31}, + dictWord{135, 11, 2043}, + dictWord{4, 0, 354}, + dictWord{146, 11, 4}, + dictWord{140, 11, 80}, + dictWord{135, 0, 1558}, + dictWord{134, 10, 1886}, + dictWord{ + 5, + 10, + 205, + }, + dictWord{6, 10, 438}, + dictWord{137, 10, 711}, + dictWord{133, 11, 522}, + dictWord{133, 10, 534}, + dictWord{7, 0, 235}, + dictWord{7, 0, 1475}, + dictWord{ + 15, + 0, + 68, + }, + dictWord{146, 0, 120}, + dictWord{137, 10, 691}, + dictWord{4, 0, 942}, + dictWord{6, 0, 1813}, + dictWord{8, 0, 917}, + dictWord{10, 0, 884}, + dictWord{ + 12, + 0, + 696, + }, + dictWord{12, 0, 717}, + dictWord{12, 0, 723}, + dictWord{12, 0, 738}, + dictWord{12, 0, 749}, + dictWord{12, 0, 780}, + dictWord{16, 0, 97}, + dictWord{146, 0, 169}, + dictWord{6, 10, 443}, + dictWord{8, 11, 562}, + dictWord{9, 10, 237}, + dictWord{9, 10, 571}, + dictWord{9, 10, 695}, + dictWord{10, 10, 139}, + dictWord{11, 10, 715}, + dictWord{12, 10, 417}, + dictWord{141, 10, 421}, + dictWord{135, 0, 957}, + dictWord{133, 0, 830}, + dictWord{134, 11, 1771}, + dictWord{146, 0, 23}, + dictWord{ + 5, + 0, + 496, + }, + dictWord{6, 0, 694}, + dictWord{7, 0, 203}, + dictWord{7, 11, 1190}, + dictWord{137, 11, 620}, + dictWord{137, 11, 132}, + dictWord{6, 0, 547}, + dictWord{ + 134, + 0, + 1549, + }, + dictWord{8, 11, 258}, + dictWord{9, 11, 208}, + dictWord{137, 11, 359}, + dictWord{4, 0, 864}, + dictWord{5, 0, 88}, + dictWord{137, 0, 239}, + dictWord{ + 135, + 11, + 493, + }, + dictWord{4, 11, 317}, + dictWord{135, 11, 1279}, + dictWord{132, 11, 477}, + dictWord{4, 10, 578}, + dictWord{5, 11, 63}, + dictWord{133, 11, 509}, + dictWord{ + 7, + 0, + 650, + }, + dictWord{135, 0, 1310}, + dictWord{7, 0, 1076}, + dictWord{9, 0, 80}, + dictWord{11, 0, 78}, + dictWord{11, 0, 421}, + dictWord{11, 0, 534}, + dictWord{ + 140, + 0, + 545, + }, + dictWord{132, 11, 288}, + dictWord{12, 0, 553}, + dictWord{14, 0, 118}, + dictWord{133, 10, 923}, + dictWord{7, 0, 274}, + dictWord{11, 0, 479}, + dictWord{ + 139, + 0, + 507, + }, + dictWord{8, 11, 89}, + dictWord{8, 11, 620}, + dictWord{9, 11, 49}, + dictWord{10, 11, 774}, + dictWord{11, 11, 628}, + dictWord{12, 11, 322}, + dictWord{ + 143, + 11, + 124, + }, + dictWord{4, 0, 497}, + dictWord{135, 0, 1584}, + dictWord{7, 0, 261}, + dictWord{7, 0, 1115}, + dictWord{7, 0, 1354}, + dictWord{7, 0, 1404}, + dictWord{ + 7, + 0, + 1588, + }, + dictWord{7, 0, 1705}, + dictWord{7, 0, 1902}, + dictWord{9, 0, 465}, + dictWord{10, 0, 248}, + dictWord{10, 0, 349}, + dictWord{10, 0, 647}, + dictWord{11, 0, 527}, + dictWord{11, 0, 660}, + dictWord{11, 0, 669}, + dictWord{12, 0, 529}, + dictWord{13, 0, 305}, + dictWord{132, 10, 924}, + dictWord{133, 10, 665}, + dictWord{ + 136, + 0, + 13, + }, + dictWord{6, 0, 791}, + dictWord{138, 11, 120}, + dictWord{7, 0, 642}, + dictWord{8, 0, 250}, + dictWord{11, 0, 123}, + dictWord{11, 0, 137}, + dictWord{13, 0, 48}, + dictWord{142, 0, 95}, + dictWord{4, 10, 265}, + dictWord{7, 10, 807}, + dictWord{135, 10, 950}, + dictWord{5, 10, 93}, + dictWord{140, 10, 267}, + dictWord{135, 0, 1429}, + dictWord{4, 0, 949}, + dictWord{10, 0, 885}, + dictWord{10, 0, 891}, + dictWord{10, 0, 900}, + dictWord{10, 0, 939}, + dictWord{12, 0, 760}, + dictWord{142, 0, 449}, + dictWord{139, 11, 366}, + dictWord{132, 0, 818}, + dictWord{134, 11, 85}, + dictWord{135, 10, 994}, + dictWord{7, 0, 330}, + dictWord{5, 10, 233}, + dictWord{5, 10, 320}, + dictWord{6, 10, 140}, + dictWord{136, 10, 295}, + dictWord{4, 0, 1004}, + dictWord{8, 0, 982}, + dictWord{136, 0, 993}, + dictWord{133, 10, 978}, + dictWord{4, 10, 905}, + dictWord{6, 10, 1701}, + dictWord{137, 10, 843}, + dictWord{10, 0, 545}, + dictWord{140, 0, 301}, + dictWord{6, 0, 947}, + dictWord{134, 0, 1062}, + dictWord{ + 134, + 0, + 1188, + }, + dictWord{4, 0, 904}, + dictWord{5, 0, 794}, + dictWord{152, 10, 6}, + dictWord{134, 0, 1372}, + dictWord{135, 11, 608}, + dictWord{5, 11, 279}, + dictWord{ + 6, + 11, + 235, + }, + dictWord{7, 11, 468}, + dictWord{8, 11, 446}, + dictWord{9, 11, 637}, + dictWord{10, 11, 717}, + dictWord{11, 11, 738}, + dictWord{140, 11, 514}, + dictWord{ + 132, + 10, + 509, + }, + dictWord{5, 11, 17}, + dictWord{6, 11, 371}, + dictWord{137, 11, 528}, + dictWord{132, 0, 693}, + dictWord{4, 11, 115}, + dictWord{5, 11, 669}, + dictWord{ + 6, + 11, + 407, + }, + dictWord{8, 11, 311}, + dictWord{11, 11, 10}, + dictWord{141, 11, 5}, + dictWord{11, 0, 377}, + dictWord{7, 10, 273}, + dictWord{137, 11, 381}, + dictWord{ + 135, + 0, + 695, + }, + dictWord{7, 0, 386}, + dictWord{138, 0, 713}, + dictWord{135, 10, 1041}, + dictWord{134, 0, 1291}, + dictWord{6, 0, 7}, + dictWord{6, 0, 35}, + dictWord{ + 7, + 0, + 147, + }, + dictWord{7, 0, 1069}, + dictWord{7, 0, 1568}, + dictWord{7, 0, 1575}, + dictWord{7, 0, 1917}, + dictWord{8, 0, 43}, + dictWord{8, 0, 208}, + dictWord{9, 0, 128}, + dictWord{ + 9, + 0, + 866, + }, + dictWord{10, 0, 20}, + dictWord{11, 0, 981}, + dictWord{147, 0, 33}, + dictWord{7, 0, 893}, + dictWord{141, 0, 424}, + dictWord{139, 10, 234}, + dictWord{ + 150, + 11, + 56, + }, + dictWord{5, 11, 779}, + dictWord{5, 11, 807}, + dictWord{6, 11, 1655}, + dictWord{134, 11, 1676}, + dictWord{5, 10, 802}, + dictWord{7, 10, 2021}, + dictWord{136, 10, 805}, + dictWord{4, 11, 196}, + dictWord{5, 10, 167}, + dictWord{5, 11, 558}, + dictWord{5, 10, 899}, + dictWord{5, 11, 949}, + dictWord{6, 10, 410}, + dictWord{137, 10, 777}, + dictWord{137, 10, 789}, + dictWord{134, 10, 1705}, + dictWord{8, 0, 904}, + dictWord{140, 0, 787}, + dictWord{6, 0, 322}, + dictWord{9, 0, 552}, + dictWord{11, 0, 274}, + dictWord{13, 0, 209}, + dictWord{13, 0, 499}, + dictWord{14, 0, 85}, + dictWord{15, 0, 126}, + dictWord{145, 0, 70}, + dictWord{135, 10, 10}, + dictWord{ + 5, + 10, + 11, + }, + dictWord{6, 10, 117}, + dictWord{6, 10, 485}, + dictWord{7, 10, 1133}, + dictWord{9, 10, 582}, + dictWord{9, 10, 594}, + dictWord{11, 10, 21}, + dictWord{ + 11, + 10, + 818, + }, + dictWord{12, 10, 535}, + dictWord{141, 10, 86}, + dictWord{4, 10, 264}, + dictWord{7, 10, 1067}, + dictWord{8, 10, 204}, + dictWord{8, 10, 385}, + dictWord{139, 10, 953}, + dictWord{132, 11, 752}, + dictWord{138, 10, 56}, + dictWord{133, 10, 470}, + dictWord{6, 0, 1808}, + dictWord{8, 0, 83}, + dictWord{8, 0, 742}, + dictWord{8, 0, 817}, + dictWord{9, 0, 28}, + dictWord{9, 0, 29}, + dictWord{9, 0, 885}, + dictWord{10, 0, 387}, + dictWord{11, 0, 633}, + dictWord{11, 0, 740}, + dictWord{13, 0, 235}, + dictWord{13, 0, 254}, + dictWord{15, 0, 143}, + dictWord{143, 0, 146}, + dictWord{140, 0, 49}, + dictWord{134, 0, 1832}, + dictWord{4, 11, 227}, + dictWord{5, 11, 159}, + dictWord{5, 11, 409}, + dictWord{7, 11, 80}, + dictWord{10, 11, 294}, + dictWord{10, 11, 479}, + dictWord{12, 11, 418}, + dictWord{14, 11, 50}, + dictWord{14, 11, 249}, + dictWord{142, 11, 295}, + dictWord{7, 11, 1470}, + dictWord{8, 11, 66}, + dictWord{8, 11, 137}, + dictWord{8, 11, 761}, + dictWord{9, 11, 638}, + dictWord{11, 11, 80}, + dictWord{11, 11, 212}, + dictWord{11, 11, 368}, + dictWord{11, 11, 418}, + dictWord{12, 11, 8}, + dictWord{13, 11, 15}, + dictWord{16, 11, 61}, + dictWord{17, 11, 59}, + dictWord{19, 11, 28}, + dictWord{148, 11, 84}, + dictWord{139, 10, 1015}, + dictWord{138, 11, 468}, + dictWord{135, 0, 421}, + dictWord{6, 0, 415}, + dictWord{ + 7, + 0, + 1049, + }, + dictWord{137, 0, 442}, + dictWord{6, 11, 38}, + dictWord{7, 11, 1220}, + dictWord{8, 11, 185}, + dictWord{8, 11, 256}, + dictWord{9, 11, 22}, + dictWord{ + 9, + 11, + 331, + }, + dictWord{10, 11, 738}, + dictWord{11, 11, 205}, + dictWord{11, 11, 540}, + dictWord{11, 11, 746}, + dictWord{13, 11, 399}, + dictWord{13, 11, 465}, + dictWord{ + 14, + 11, + 88, + }, + dictWord{142, 11, 194}, + dictWord{139, 0, 289}, + dictWord{133, 10, 715}, + dictWord{4, 0, 110}, + dictWord{10, 0, 415}, + dictWord{10, 0, 597}, + dictWord{142, 0, 206}, + dictWord{4, 11, 159}, + dictWord{6, 11, 115}, + dictWord{7, 11, 252}, + dictWord{7, 11, 257}, + dictWord{7, 11, 1928}, + dictWord{8, 11, 69}, + dictWord{ + 9, + 11, + 384, + }, + dictWord{10, 11, 91}, + dictWord{10, 11, 615}, + dictWord{12, 11, 375}, + dictWord{14, 11, 235}, + dictWord{18, 11, 117}, + dictWord{147, 11, 123}, + dictWord{5, 11, 911}, + dictWord{136, 11, 278}, + dictWord{7, 0, 205}, + dictWord{7, 0, 2000}, + dictWord{8, 10, 794}, + dictWord{9, 10, 400}, + dictWord{10, 10, 298}, + dictWord{142, 10, 228}, + dictWord{135, 11, 1774}, + dictWord{4, 11, 151}, + dictWord{7, 11, 1567}, + dictWord{8, 11, 351}, + dictWord{137, 11, 322}, + dictWord{ + 136, + 10, + 724, + }, + dictWord{133, 11, 990}, + dictWord{7, 0, 1539}, + dictWord{11, 0, 512}, + dictWord{13, 0, 205}, + dictWord{19, 0, 30}, + dictWord{22, 0, 36}, + dictWord{23, 0, 19}, + dictWord{135, 11, 1539}, + dictWord{5, 11, 194}, + dictWord{7, 11, 1662}, + dictWord{9, 11, 90}, + dictWord{140, 11, 180}, + dictWord{6, 10, 190}, + dictWord{ + 7, + 10, + 768, + }, + dictWord{135, 10, 1170}, + dictWord{134, 0, 1340}, + dictWord{4, 0, 283}, + dictWord{135, 0, 1194}, + dictWord{133, 11, 425}, + dictWord{133, 11, 971}, + dictWord{12, 0, 549}, + dictWord{14, 10, 67}, + dictWord{147, 10, 60}, + dictWord{135, 10, 1023}, + dictWord{134, 0, 1720}, + dictWord{138, 11, 587}, + dictWord{ + 5, + 11, + 72, + }, + dictWord{6, 11, 264}, + dictWord{7, 11, 21}, + dictWord{7, 11, 46}, + dictWord{7, 11, 2013}, + dictWord{8, 11, 215}, + dictWord{8, 11, 513}, + dictWord{10, 11, 266}, + dictWord{139, 11, 22}, + dictWord{5, 0, 319}, + dictWord{135, 0, 534}, + dictWord{6, 10, 137}, + dictWord{9, 10, 75}, + dictWord{9, 10, 253}, + dictWord{10, 10, 194}, + dictWord{138, 10, 444}, + dictWord{7, 0, 1180}, + dictWord{20, 0, 112}, + dictWord{6, 11, 239}, + dictWord{7, 11, 118}, + dictWord{10, 11, 95}, + dictWord{11, 11, 603}, + dictWord{13, 11, 443}, + dictWord{14, 11, 160}, + dictWord{143, 11, 4}, + dictWord{134, 11, 431}, + dictWord{5, 11, 874}, + dictWord{6, 11, 1677}, + dictWord{ + 11, + 10, + 643, + }, + dictWord{12, 10, 115}, + dictWord{143, 11, 0}, + dictWord{134, 0, 967}, + dictWord{6, 11, 65}, + dictWord{7, 11, 939}, + dictWord{7, 11, 1172}, + dictWord{ + 7, + 11, + 1671, + }, + dictWord{9, 11, 540}, + dictWord{10, 11, 696}, + dictWord{11, 11, 265}, + dictWord{11, 11, 732}, + dictWord{11, 11, 928}, + dictWord{11, 11, 937}, + dictWord{ + 12, + 11, + 399, + }, + dictWord{13, 11, 438}, + dictWord{149, 11, 19}, + dictWord{137, 11, 200}, + dictWord{135, 0, 1940}, + dictWord{5, 10, 760}, + dictWord{7, 10, 542}, + dictWord{8, 10, 135}, + dictWord{136, 10, 496}, + dictWord{140, 11, 44}, + dictWord{7, 11, 1655}, + dictWord{136, 11, 305}, + dictWord{7, 10, 319}, + dictWord{ + 7, + 10, + 355, + }, + dictWord{7, 10, 763}, + dictWord{10, 10, 389}, + dictWord{145, 10, 43}, + dictWord{136, 0, 735}, + dictWord{138, 10, 786}, + dictWord{137, 11, 19}, + dictWord{132, 11, 696}, + dictWord{5, 0, 132}, + dictWord{9, 0, 486}, + dictWord{9, 0, 715}, + dictWord{10, 0, 458}, + dictWord{11, 0, 373}, + dictWord{11, 0, 668}, + dictWord{ + 11, + 0, + 795, + }, + dictWord{11, 0, 897}, + dictWord{12, 0, 272}, + dictWord{12, 0, 424}, + dictWord{12, 0, 539}, + dictWord{12, 0, 558}, + dictWord{14, 0, 245}, + dictWord{ + 14, + 0, + 263, + }, + dictWord{14, 0, 264}, + dictWord{14, 0, 393}, + dictWord{142, 0, 403}, + dictWord{10, 0, 38}, + dictWord{139, 0, 784}, + dictWord{132, 0, 838}, + dictWord{ + 4, + 11, + 302, + }, + dictWord{135, 11, 1766}, + dictWord{133, 0, 379}, + dictWord{5, 0, 8}, + dictWord{6, 0, 89}, + dictWord{6, 0, 400}, + dictWord{7, 0, 1569}, + dictWord{7, 0, 1623}, + dictWord{7, 0, 1850}, + dictWord{8, 0, 218}, + dictWord{8, 0, 422}, + dictWord{9, 0, 570}, + dictWord{10, 0, 626}, + dictWord{4, 11, 726}, + dictWord{133, 11, 630}, + dictWord{ + 4, + 0, + 1017, + }, + dictWord{138, 0, 660}, + dictWord{6, 0, 387}, + dictWord{7, 0, 882}, + dictWord{141, 0, 111}, + dictWord{6, 0, 224}, + dictWord{7, 0, 877}, + dictWord{ + 137, + 0, + 647, + }, + dictWord{4, 10, 58}, + dictWord{5, 10, 286}, + dictWord{6, 10, 319}, + dictWord{7, 10, 402}, + dictWord{7, 10, 1254}, + dictWord{7, 10, 1903}, + dictWord{ + 8, + 10, + 356, + }, + dictWord{140, 10, 408}, + dictWord{135, 0, 790}, + dictWord{9, 0, 510}, + dictWord{10, 0, 53}, + dictWord{4, 10, 389}, + dictWord{9, 10, 181}, + dictWord{ + 10, + 10, + 29, + }, + dictWord{10, 10, 816}, + dictWord{11, 10, 311}, + dictWord{11, 10, 561}, + dictWord{12, 10, 67}, + dictWord{141, 10, 181}, + dictWord{142, 0, 458}, + dictWord{ + 6, + 11, + 118, + }, + dictWord{7, 11, 215}, + dictWord{7, 11, 1521}, + dictWord{140, 11, 11}, + dictWord{134, 0, 954}, + dictWord{135, 0, 394}, + dictWord{134, 0, 1367}, + dictWord{5, 11, 225}, + dictWord{133, 10, 373}, + dictWord{132, 0, 882}, + dictWord{7, 0, 1409}, + dictWord{135, 10, 1972}, + dictWord{135, 10, 1793}, + dictWord{ + 4, + 11, + 370, + }, + dictWord{5, 11, 756}, + dictWord{135, 11, 1326}, + dictWord{150, 11, 13}, + dictWord{7, 11, 354}, + dictWord{10, 11, 410}, + dictWord{139, 11, 815}, + dictWord{6, 11, 1662}, + dictWord{7, 11, 48}, + dictWord{8, 11, 771}, + dictWord{10, 11, 116}, + dictWord{13, 11, 104}, + dictWord{14, 11, 105}, + dictWord{14, 11, 184}, + dictWord{15, 11, 168}, + dictWord{19, 11, 92}, + dictWord{148, 11, 68}, + dictWord{7, 0, 124}, + dictWord{136, 0, 38}, + dictWord{5, 0, 261}, + dictWord{7, 0, 78}, + dictWord{ + 7, + 0, + 199, + }, + dictWord{8, 0, 815}, + dictWord{9, 0, 126}, + dictWord{10, 0, 342}, + dictWord{140, 0, 647}, + dictWord{4, 0, 628}, + dictWord{140, 0, 724}, + dictWord{7, 0, 266}, + dictWord{8, 0, 804}, + dictWord{7, 10, 1651}, + dictWord{145, 10, 89}, + dictWord{135, 0, 208}, + dictWord{134, 0, 1178}, + dictWord{6, 0, 79}, + dictWord{135, 0, 1519}, + dictWord{132, 10, 672}, + dictWord{133, 10, 737}, + dictWord{136, 0, 741}, + dictWord{132, 11, 120}, + dictWord{4, 0, 710}, + dictWord{6, 0, 376}, + dictWord{ + 134, + 0, + 606, + }, + dictWord{134, 0, 1347}, + dictWord{134, 0, 1494}, + dictWord{6, 0, 850}, + dictWord{6, 0, 1553}, + dictWord{137, 0, 821}, + dictWord{5, 10, 145}, + dictWord{ + 134, + 11, + 593, + }, + dictWord{7, 0, 1311}, + dictWord{140, 0, 135}, + dictWord{4, 0, 467}, + dictWord{5, 0, 405}, + dictWord{134, 0, 544}, + dictWord{5, 11, 820}, + dictWord{ + 135, + 11, + 931, + }, + dictWord{6, 0, 100}, + dictWord{7, 0, 244}, + dictWord{7, 0, 632}, + dictWord{7, 0, 1609}, + dictWord{8, 0, 178}, + dictWord{8, 0, 638}, + dictWord{141, 0, 58}, + dictWord{4, 10, 387}, + dictWord{135, 10, 1288}, + dictWord{6, 11, 151}, + dictWord{6, 11, 1675}, + dictWord{7, 11, 383}, + dictWord{151, 11, 10}, + dictWord{ + 132, + 0, + 481, + }, + dictWord{135, 10, 550}, + dictWord{134, 0, 1378}, + dictWord{6, 11, 1624}, + dictWord{11, 11, 11}, + dictWord{12, 11, 422}, + dictWord{13, 11, 262}, + dictWord{142, 11, 360}, + dictWord{133, 0, 791}, + dictWord{4, 11, 43}, + dictWord{5, 11, 344}, + dictWord{133, 11, 357}, + dictWord{7, 0, 1227}, + dictWord{140, 0, 978}, + dictWord{7, 0, 686}, + dictWord{8, 0, 33}, + dictWord{8, 0, 238}, + dictWord{10, 0, 616}, + dictWord{11, 0, 467}, + dictWord{11, 0, 881}, + dictWord{13, 0, 217}, + dictWord{ + 13, + 0, + 253, + }, + dictWord{142, 0, 268}, + dictWord{137, 0, 857}, + dictWord{8, 0, 467}, + dictWord{8, 0, 1006}, + dictWord{7, 11, 148}, + dictWord{8, 11, 284}, + dictWord{ + 141, + 11, + 63, + }, + dictWord{4, 10, 576}, + dictWord{135, 10, 1263}, + dictWord{133, 11, 888}, + dictWord{5, 10, 919}, + dictWord{134, 10, 1673}, + dictWord{20, 10, 37}, + dictWord{148, 11, 37}, + dictWord{132, 0, 447}, + dictWord{132, 11, 711}, + dictWord{4, 0, 128}, + dictWord{5, 0, 415}, + dictWord{6, 0, 462}, + dictWord{7, 0, 294}, + dictWord{ + 7, + 0, + 578, + }, + dictWord{10, 0, 710}, + dictWord{139, 0, 86}, + dictWord{4, 10, 82}, + dictWord{5, 10, 333}, + dictWord{5, 10, 904}, + dictWord{6, 10, 207}, + dictWord{7, 10, 325}, + dictWord{7, 10, 1726}, + dictWord{8, 10, 101}, + dictWord{10, 10, 778}, + dictWord{139, 10, 220}, + dictWord{136, 0, 587}, + dictWord{137, 11, 440}, + dictWord{ + 133, + 10, + 903, + }, + dictWord{6, 0, 427}, + dictWord{7, 0, 1018}, + dictWord{138, 0, 692}, + dictWord{4, 0, 195}, + dictWord{135, 0, 802}, + dictWord{140, 10, 147}, + dictWord{ + 134, + 0, + 1546, + }, + dictWord{134, 0, 684}, + dictWord{132, 10, 705}, + dictWord{136, 0, 345}, + dictWord{11, 11, 678}, + dictWord{140, 11, 307}, + dictWord{ + 133, + 0, + 365, + }, + dictWord{134, 0, 1683}, + dictWord{4, 11, 65}, + dictWord{5, 11, 479}, + dictWord{5, 11, 1004}, + dictWord{7, 11, 1913}, + dictWord{8, 11, 317}, + dictWord{ + 9, + 11, + 302, + }, + dictWord{10, 11, 612}, + dictWord{141, 11, 22}, + dictWord{138, 0, 472}, + dictWord{4, 11, 261}, + dictWord{135, 11, 510}, + dictWord{134, 10, 90}, + dictWord{142, 0, 433}, + dictWord{151, 0, 28}, + dictWord{4, 11, 291}, + dictWord{7, 11, 101}, + dictWord{9, 11, 515}, + dictWord{12, 11, 152}, + dictWord{12, 11, 443}, + dictWord{13, 11, 392}, + dictWord{142, 11, 357}, + dictWord{140, 0, 997}, + dictWord{5, 0, 3}, + dictWord{8, 0, 578}, + dictWord{9, 0, 118}, + dictWord{10, 0, 705}, + dictWord{ + 141, + 0, + 279, + }, + dictWord{135, 11, 1266}, + dictWord{7, 10, 813}, + dictWord{12, 10, 497}, + dictWord{141, 10, 56}, + dictWord{133, 0, 229}, + dictWord{6, 10, 125}, + dictWord{135, 10, 1277}, + dictWord{8, 0, 102}, + dictWord{10, 0, 578}, + dictWord{10, 0, 672}, + dictWord{12, 0, 496}, + dictWord{13, 0, 408}, + dictWord{14, 0, 121}, + dictWord{17, 0, 106}, + dictWord{151, 10, 12}, + dictWord{6, 0, 866}, + dictWord{134, 0, 1080}, + dictWord{136, 0, 1022}, + dictWord{4, 11, 130}, + dictWord{135, 11, 843}, + dictWord{5, 11, 42}, + dictWord{5, 11, 879}, + dictWord{7, 11, 245}, + dictWord{7, 11, 324}, + dictWord{7, 11, 1532}, + dictWord{11, 11, 463}, + dictWord{11, 11, 472}, + dictWord{13, 11, 363}, + dictWord{144, 11, 52}, + dictWord{150, 0, 55}, + dictWord{8, 0, 115}, + dictWord{8, 0, 350}, + dictWord{9, 0, 489}, + dictWord{10, 0, 128}, + dictWord{ + 11, + 0, + 306, + }, + dictWord{12, 0, 373}, + dictWord{14, 0, 30}, + dictWord{17, 0, 79}, + dictWord{19, 0, 80}, + dictWord{4, 11, 134}, + dictWord{133, 11, 372}, + dictWord{ + 134, + 0, + 657, + }, + dictWord{134, 0, 933}, + dictWord{135, 11, 1147}, + dictWord{4, 0, 230}, + dictWord{133, 0, 702}, + dictWord{134, 0, 1728}, + dictWord{4, 0, 484}, + dictWord{ + 18, + 0, + 26, + }, + dictWord{19, 0, 42}, + dictWord{20, 0, 43}, + dictWord{21, 0, 0}, + dictWord{23, 0, 27}, + dictWord{152, 0, 14}, + dictWord{7, 0, 185}, + dictWord{135, 0, 703}, + dictWord{ + 6, + 0, + 417, + }, + dictWord{10, 0, 618}, + dictWord{7, 10, 1106}, + dictWord{9, 10, 770}, + dictWord{11, 10, 112}, + dictWord{140, 10, 413}, + dictWord{134, 0, 803}, + dictWord{132, 11, 644}, + dictWord{134, 0, 1262}, + dictWord{7, 11, 540}, + dictWord{12, 10, 271}, + dictWord{145, 10, 109}, + dictWord{135, 11, 123}, + dictWord{ + 132, + 0, + 633, + }, + dictWord{134, 11, 623}, + dictWord{4, 11, 908}, + dictWord{5, 11, 359}, + dictWord{5, 11, 508}, + dictWord{6, 11, 1723}, + dictWord{7, 11, 343}, + dictWord{ + 7, + 11, + 1996, + }, + dictWord{135, 11, 2026}, + dictWord{135, 0, 479}, + dictWord{10, 0, 262}, + dictWord{7, 10, 304}, + dictWord{9, 10, 646}, + dictWord{9, 10, 862}, + dictWord{ + 11, + 10, + 696, + }, + dictWord{12, 10, 208}, + dictWord{15, 10, 79}, + dictWord{147, 10, 108}, + dictWord{4, 11, 341}, + dictWord{135, 11, 480}, + dictWord{134, 0, 830}, + dictWord{5, 0, 70}, + dictWord{5, 0, 622}, + dictWord{6, 0, 334}, + dictWord{7, 0, 1032}, + dictWord{9, 0, 171}, + dictWord{11, 0, 26}, + dictWord{11, 0, 213}, + dictWord{ + 11, + 0, + 637, + }, + dictWord{11, 0, 707}, + dictWord{12, 0, 202}, + dictWord{12, 0, 380}, + dictWord{13, 0, 226}, + dictWord{13, 0, 355}, + dictWord{14, 0, 222}, + dictWord{145, 0, 42}, + dictWord{135, 10, 981}, + dictWord{143, 0, 217}, + dictWord{137, 11, 114}, + dictWord{4, 0, 23}, + dictWord{4, 0, 141}, + dictWord{5, 0, 313}, + dictWord{5, 0, 1014}, + dictWord{6, 0, 50}, + dictWord{6, 0, 51}, + dictWord{7, 0, 142}, + dictWord{7, 0, 384}, + dictWord{7, 0, 559}, + dictWord{8, 0, 640}, + dictWord{9, 0, 460}, + dictWord{9, 0, 783}, + dictWord{11, 0, 741}, + dictWord{12, 0, 183}, + dictWord{141, 0, 488}, + dictWord{141, 0, 360}, + dictWord{7, 0, 1586}, + dictWord{7, 11, 1995}, + dictWord{8, 11, 299}, + dictWord{11, 11, 890}, + dictWord{140, 11, 674}, + dictWord{132, 10, 434}, + dictWord{7, 0, 652}, + dictWord{134, 10, 550}, + dictWord{7, 0, 766}, + dictWord{5, 10, 553}, + dictWord{138, 10, 824}, + dictWord{7, 0, 737}, + dictWord{8, 0, 298}, + dictWord{136, 10, 452}, + dictWord{4, 11, 238}, + dictWord{5, 11, 503}, + dictWord{6, 11, 179}, + dictWord{7, 11, 2003}, + dictWord{8, 11, 381}, + dictWord{8, 11, 473}, + dictWord{9, 11, 149}, + dictWord{10, 11, 183}, + dictWord{15, 11, 45}, + dictWord{143, 11, 86}, + dictWord{133, 10, 292}, + dictWord{5, 0, 222}, + dictWord{9, 0, 655}, + dictWord{138, 0, 534}, + dictWord{138, 10, 135}, + dictWord{4, 11, 121}, + dictWord{5, 11, 156}, + dictWord{5, 11, 349}, + dictWord{9, 11, 136}, + dictWord{10, 11, 605}, + dictWord{14, 11, 342}, + dictWord{147, 11, 107}, + dictWord{137, 0, 906}, + dictWord{6, 0, 1013}, + dictWord{134, 0, 1250}, + dictWord{6, 0, 1956}, + dictWord{6, 0, 2009}, + dictWord{8, 0, 991}, + dictWord{144, 0, 120}, + dictWord{135, 11, 1192}, + dictWord{ + 138, + 0, + 503, + }, + dictWord{5, 0, 154}, + dictWord{7, 0, 1491}, + dictWord{10, 0, 379}, + dictWord{138, 0, 485}, + dictWord{6, 0, 1867}, + dictWord{6, 0, 1914}, + dictWord{6, 0, 1925}, + dictWord{9, 0, 917}, + dictWord{9, 0, 925}, + dictWord{9, 0, 932}, + dictWord{9, 0, 951}, + dictWord{9, 0, 1007}, + dictWord{9, 0, 1013}, + dictWord{12, 0, 806}, + dictWord{ + 12, + 0, + 810, + }, + dictWord{12, 0, 814}, + dictWord{12, 0, 816}, + dictWord{12, 0, 824}, + dictWord{12, 0, 832}, + dictWord{12, 0, 837}, + dictWord{12, 0, 863}, + dictWord{ + 12, + 0, + 868, + }, + dictWord{12, 0, 870}, + dictWord{12, 0, 889}, + dictWord{12, 0, 892}, + dictWord{12, 0, 900}, + dictWord{12, 0, 902}, + dictWord{12, 0, 908}, + dictWord{12, 0, 933}, + dictWord{12, 0, 942}, + dictWord{12, 0, 949}, + dictWord{12, 0, 954}, + dictWord{15, 0, 175}, + dictWord{15, 0, 203}, + dictWord{15, 0, 213}, + dictWord{15, 0, 218}, + dictWord{15, 0, 225}, + dictWord{15, 0, 231}, + dictWord{15, 0, 239}, + dictWord{15, 0, 248}, + dictWord{15, 0, 252}, + dictWord{18, 0, 190}, + dictWord{18, 0, 204}, + dictWord{ + 18, + 0, + 215, + }, + dictWord{18, 0, 216}, + dictWord{18, 0, 222}, + dictWord{18, 0, 225}, + dictWord{18, 0, 230}, + dictWord{18, 0, 239}, + dictWord{18, 0, 241}, + dictWord{ + 21, + 0, + 42, + }, + dictWord{21, 0, 43}, + dictWord{21, 0, 44}, + dictWord{21, 0, 45}, + dictWord{21, 0, 46}, + dictWord{21, 0, 53}, + dictWord{24, 0, 27}, + dictWord{152, 0, 31}, + dictWord{ + 133, + 0, + 716, + }, + dictWord{135, 0, 844}, + dictWord{4, 0, 91}, + dictWord{5, 0, 388}, + dictWord{5, 0, 845}, + dictWord{6, 0, 206}, + dictWord{6, 0, 252}, + dictWord{6, 0, 365}, + dictWord{ + 7, + 0, + 136, + }, + dictWord{7, 0, 531}, + dictWord{136, 0, 621}, + dictWord{7, 10, 393}, + dictWord{10, 10, 603}, + dictWord{139, 10, 206}, + dictWord{6, 11, 80}, + dictWord{ + 6, + 11, + 1694, + }, + dictWord{7, 11, 173}, + dictWord{7, 11, 1974}, + dictWord{9, 11, 547}, + dictWord{10, 11, 730}, + dictWord{14, 11, 18}, + dictWord{150, 11, 39}, + dictWord{137, 0, 748}, + dictWord{4, 11, 923}, + dictWord{134, 11, 1711}, + dictWord{4, 10, 912}, + dictWord{137, 10, 232}, + dictWord{7, 10, 98}, + dictWord{7, 10, 1973}, + dictWord{136, 10, 716}, + dictWord{14, 0, 103}, + dictWord{133, 10, 733}, + dictWord{132, 11, 595}, + dictWord{12, 0, 158}, + dictWord{18, 0, 8}, + dictWord{19, 0, 62}, + dictWord{20, 0, 6}, + dictWord{22, 0, 4}, + dictWord{23, 0, 2}, + dictWord{23, 0, 9}, + dictWord{5, 11, 240}, + dictWord{6, 11, 459}, + dictWord{7, 11, 12}, + dictWord{7, 11, 114}, + dictWord{7, 11, 502}, + dictWord{7, 11, 1751}, + dictWord{7, 11, 1753}, + dictWord{7, 11, 1805}, + dictWord{8, 11, 658}, + dictWord{9, 11, 1}, + dictWord{11, 11, 959}, + dictWord{13, 11, 446}, + dictWord{142, 11, 211}, + dictWord{135, 0, 576}, + dictWord{5, 0, 771}, + dictWord{5, 0, 863}, + dictWord{5, 0, 898}, + dictWord{6, 0, 648}, + dictWord{ + 6, + 0, + 1632, + }, + dictWord{6, 0, 1644}, + dictWord{134, 0, 1780}, + dictWord{133, 0, 331}, + dictWord{7, 11, 633}, + dictWord{7, 11, 905}, + dictWord{7, 11, 909}, + dictWord{ + 7, + 11, + 1538, + }, + dictWord{9, 11, 767}, + dictWord{140, 11, 636}, + dictWord{140, 0, 632}, + dictWord{5, 0, 107}, + dictWord{7, 0, 201}, + dictWord{136, 0, 518}, + dictWord{ + 6, + 0, + 446, + }, + dictWord{7, 0, 1817}, + dictWord{134, 11, 490}, + dictWord{9, 0, 851}, + dictWord{141, 0, 510}, + dictWord{7, 11, 250}, + dictWord{8, 11, 506}, + dictWord{ + 136, + 11, + 507, + }, + dictWord{4, 0, 504}, + dictWord{137, 10, 72}, + dictWord{132, 11, 158}, + dictWord{4, 11, 140}, + dictWord{7, 11, 362}, + dictWord{8, 11, 209}, + dictWord{ + 9, + 11, + 10, + }, + dictWord{9, 11, 160}, + dictWord{9, 11, 503}, + dictWord{10, 11, 689}, + dictWord{11, 11, 350}, + dictWord{11, 11, 553}, + dictWord{11, 11, 725}, + dictWord{ + 12, + 11, + 252, + }, + dictWord{12, 11, 583}, + dictWord{13, 11, 192}, + dictWord{13, 11, 352}, + dictWord{14, 11, 269}, + dictWord{14, 11, 356}, + dictWord{148, 11, 50}, + dictWord{6, 11, 597}, + dictWord{135, 11, 1318}, + dictWord{135, 10, 1454}, + dictWord{5, 0, 883}, + dictWord{5, 0, 975}, + dictWord{8, 0, 392}, + dictWord{148, 0, 7}, + dictWord{6, 11, 228}, + dictWord{7, 11, 1341}, + dictWord{9, 11, 408}, + dictWord{138, 11, 343}, + dictWord{11, 11, 348}, + dictWord{11, 10, 600}, + dictWord{12, 11, 99}, + dictWord{13, 10, 245}, + dictWord{18, 11, 1}, + dictWord{18, 11, 11}, + dictWord{147, 11, 4}, + dictWord{134, 11, 296}, + dictWord{5, 0, 922}, + dictWord{134, 0, 1707}, + dictWord{132, 11, 557}, + dictWord{4, 11, 548}, + dictWord{7, 10, 164}, + dictWord{7, 10, 1571}, + dictWord{9, 10, 107}, + dictWord{140, 10, 225}, + dictWord{ + 7, + 11, + 197, + }, + dictWord{8, 11, 142}, + dictWord{8, 11, 325}, + dictWord{9, 11, 150}, + dictWord{9, 11, 596}, + dictWord{10, 11, 350}, + dictWord{10, 11, 353}, + dictWord{ + 11, + 11, + 74, + }, + dictWord{11, 11, 315}, + dictWord{14, 11, 423}, + dictWord{143, 11, 141}, + dictWord{5, 0, 993}, + dictWord{7, 0, 515}, + dictWord{137, 0, 91}, + dictWord{4, 0, 131}, + dictWord{8, 0, 200}, + dictWord{5, 10, 484}, + dictWord{5, 10, 510}, + dictWord{6, 10, 434}, + dictWord{7, 10, 1000}, + dictWord{7, 10, 1098}, + dictWord{136, 10, 2}, + dictWord{152, 0, 10}, + dictWord{4, 11, 62}, + dictWord{5, 11, 83}, + dictWord{6, 11, 399}, + dictWord{6, 11, 579}, + dictWord{7, 11, 692}, + dictWord{7, 11, 846}, + dictWord{ + 7, + 11, + 1015, + }, + dictWord{7, 11, 1799}, + dictWord{8, 11, 403}, + dictWord{9, 11, 394}, + dictWord{10, 11, 133}, + dictWord{12, 11, 4}, + dictWord{12, 11, 297}, + dictWord{ + 12, + 11, + 452, + }, + dictWord{16, 11, 81}, + dictWord{18, 11, 19}, + dictWord{18, 11, 25}, + dictWord{21, 11, 14}, + dictWord{22, 11, 12}, + dictWord{151, 11, 18}, + dictWord{ + 140, + 11, + 459, + }, + dictWord{132, 11, 177}, + dictWord{7, 0, 1433}, + dictWord{9, 0, 365}, + dictWord{137, 11, 365}, + dictWord{132, 10, 460}, + dictWord{5, 0, 103}, + dictWord{ + 6, + 0, + 2004, + }, + dictWord{7, 0, 921}, + dictWord{8, 0, 580}, + dictWord{8, 0, 593}, + dictWord{8, 0, 630}, + dictWord{10, 0, 28}, + dictWord{5, 11, 411}, + dictWord{ + 135, + 11, + 653, + }, + dictWord{4, 10, 932}, + dictWord{133, 10, 891}, + dictWord{4, 0, 911}, + dictWord{5, 0, 867}, + dictWord{5, 0, 1013}, + dictWord{7, 0, 2034}, + dictWord{8, 0, 798}, + dictWord{136, 0, 813}, + dictWord{7, 11, 439}, + dictWord{10, 11, 727}, + dictWord{11, 11, 260}, + dictWord{139, 11, 684}, + dictWord{136, 10, 625}, + dictWord{ + 5, + 11, + 208, + }, + dictWord{7, 11, 753}, + dictWord{135, 11, 1528}, + dictWord{5, 0, 461}, + dictWord{7, 0, 1925}, + dictWord{12, 0, 39}, + dictWord{13, 0, 265}, + dictWord{ + 13, + 0, + 439, + }, + dictWord{134, 10, 76}, + dictWord{6, 0, 853}, + dictWord{8, 10, 92}, + dictWord{137, 10, 221}, + dictWord{5, 0, 135}, + dictWord{6, 0, 519}, + dictWord{7, 0, 1722}, + dictWord{10, 0, 271}, + dictWord{11, 0, 261}, + dictWord{145, 0, 54}, + dictWord{139, 11, 814}, + dictWord{14, 0, 338}, + dictWord{148, 0, 81}, + dictWord{4, 0, 300}, + dictWord{133, 0, 436}, + dictWord{5, 0, 419}, + dictWord{5, 0, 687}, + dictWord{7, 0, 864}, + dictWord{9, 0, 470}, + dictWord{135, 11, 864}, + dictWord{9, 0, 836}, + dictWord{ + 133, + 11, + 242, + }, + dictWord{134, 0, 1937}, + dictWord{4, 10, 763}, + dictWord{133, 11, 953}, + dictWord{132, 10, 622}, + dictWord{132, 0, 393}, + dictWord{ + 133, + 10, + 253, + }, + dictWord{8, 0, 357}, + dictWord{10, 0, 745}, + dictWord{14, 0, 426}, + dictWord{17, 0, 94}, + dictWord{19, 0, 57}, + dictWord{135, 10, 546}, + dictWord{5, 11, 615}, + dictWord{146, 11, 37}, + dictWord{9, 10, 73}, + dictWord{10, 10, 110}, + dictWord{14, 10, 185}, + dictWord{145, 10, 119}, + dictWord{11, 0, 703}, + dictWord{7, 10, 624}, + dictWord{7, 10, 916}, + dictWord{10, 10, 256}, + dictWord{139, 10, 87}, + dictWord{133, 11, 290}, + dictWord{5, 10, 212}, + dictWord{12, 10, 35}, + dictWord{ + 141, + 10, + 382, + }, + dictWord{132, 11, 380}, + dictWord{5, 11, 52}, + dictWord{7, 11, 277}, + dictWord{9, 11, 368}, + dictWord{139, 11, 791}, + dictWord{133, 0, 387}, + dictWord{ + 10, + 11, + 138, + }, + dictWord{139, 11, 476}, + dictWord{4, 0, 6}, + dictWord{5, 0, 708}, + dictWord{136, 0, 75}, + dictWord{7, 0, 1351}, + dictWord{9, 0, 581}, + dictWord{10, 0, 639}, + dictWord{11, 0, 453}, + dictWord{140, 0, 584}, + dictWord{132, 0, 303}, + dictWord{138, 0, 772}, + dictWord{135, 10, 1175}, + dictWord{4, 0, 749}, + dictWord{ + 5, + 10, + 816, + }, + dictWord{6, 11, 256}, + dictWord{7, 11, 307}, + dictWord{7, 11, 999}, + dictWord{7, 11, 1481}, + dictWord{7, 11, 1732}, + dictWord{7, 11, 1738}, + dictWord{ + 8, + 11, + 265, + }, + dictWord{9, 11, 414}, + dictWord{11, 11, 316}, + dictWord{12, 11, 52}, + dictWord{13, 11, 420}, + dictWord{147, 11, 100}, + dictWord{135, 11, 1296}, + dictWord{ + 6, + 0, + 1065, + }, + dictWord{5, 10, 869}, + dictWord{5, 10, 968}, + dictWord{6, 10, 1626}, + dictWord{8, 10, 734}, + dictWord{136, 10, 784}, + dictWord{4, 10, 542}, + dictWord{ + 6, + 10, + 1716, + }, + dictWord{6, 10, 1727}, + dictWord{7, 10, 1082}, + dictWord{7, 10, 1545}, + dictWord{8, 10, 56}, + dictWord{8, 10, 118}, + dictWord{8, 10, 412}, + dictWord{ + 8, + 10, + 564, + }, + dictWord{9, 10, 888}, + dictWord{9, 10, 908}, + dictWord{10, 10, 50}, + dictWord{10, 10, 423}, + dictWord{11, 10, 685}, + dictWord{11, 10, 697}, + dictWord{11, 10, 933}, + dictWord{12, 10, 299}, + dictWord{13, 10, 126}, + dictWord{13, 10, 136}, + dictWord{13, 10, 170}, + dictWord{141, 10, 190}, + dictWord{ + 134, + 0, + 226, + }, + dictWord{4, 0, 106}, + dictWord{7, 0, 310}, + dictWord{11, 0, 717}, + dictWord{133, 11, 723}, + dictWord{5, 0, 890}, + dictWord{5, 0, 988}, + dictWord{4, 10, 232}, + dictWord{9, 10, 202}, + dictWord{10, 10, 474}, + dictWord{140, 10, 433}, + dictWord{6, 0, 626}, + dictWord{142, 0, 431}, + dictWord{10, 0, 706}, + dictWord{150, 0, 44}, + dictWord{13, 0, 51}, + dictWord{6, 10, 108}, + dictWord{7, 10, 1003}, + dictWord{7, 10, 1181}, + dictWord{8, 10, 111}, + dictWord{136, 10, 343}, + dictWord{132, 0, 698}, + dictWord{5, 11, 109}, + dictWord{6, 11, 1784}, + dictWord{7, 11, 1895}, + dictWord{12, 11, 296}, + dictWord{140, 11, 302}, + dictWord{134, 0, 828}, + dictWord{ + 134, + 10, + 1712, + }, + dictWord{138, 0, 17}, + dictWord{7, 0, 1929}, + dictWord{4, 10, 133}, + dictWord{5, 11, 216}, + dictWord{7, 10, 711}, + dictWord{7, 10, 1298}, + dictWord{ + 7, + 10, + 1585, + }, + dictWord{7, 11, 1879}, + dictWord{9, 11, 141}, + dictWord{9, 11, 270}, + dictWord{9, 11, 679}, + dictWord{10, 11, 159}, + dictWord{10, 11, 553}, + dictWord{ + 11, + 11, + 197, + }, + dictWord{11, 11, 438}, + dictWord{12, 11, 538}, + dictWord{12, 11, 559}, + dictWord{13, 11, 193}, + dictWord{13, 11, 423}, + dictWord{14, 11, 144}, + dictWord{14, 11, 166}, + dictWord{14, 11, 167}, + dictWord{15, 11, 67}, + dictWord{147, 11, 84}, + dictWord{141, 11, 127}, + dictWord{7, 11, 1872}, + dictWord{ + 137, + 11, + 81, + }, + dictWord{6, 10, 99}, + dictWord{7, 10, 1808}, + dictWord{145, 10, 57}, + dictWord{134, 11, 391}, + dictWord{5, 0, 689}, + dictWord{6, 0, 84}, + dictWord{7, 0, 1250}, + dictWord{6, 10, 574}, + dictWord{7, 10, 428}, + dictWord{10, 10, 669}, + dictWord{11, 10, 485}, + dictWord{11, 10, 840}, + dictWord{12, 10, 300}, + dictWord{ + 142, + 10, + 250, + }, + dictWord{7, 11, 322}, + dictWord{136, 11, 249}, + dictWord{7, 11, 432}, + dictWord{135, 11, 1649}, + dictWord{135, 10, 1871}, + dictWord{137, 10, 252}, + dictWord{6, 11, 155}, + dictWord{140, 11, 234}, + dictWord{7, 0, 871}, + dictWord{19, 0, 27}, + dictWord{147, 11, 27}, + dictWord{140, 0, 498}, + dictWord{5, 0, 986}, + dictWord{6, 0, 130}, + dictWord{138, 0, 823}, + dictWord{6, 0, 1793}, + dictWord{7, 0, 1582}, + dictWord{8, 0, 458}, + dictWord{10, 0, 101}, + dictWord{10, 0, 318}, + dictWord{ + 10, + 0, + 945, + }, + dictWord{12, 0, 734}, + dictWord{16, 0, 104}, + dictWord{18, 0, 177}, + dictWord{6, 10, 323}, + dictWord{135, 10, 1564}, + dictWord{5, 11, 632}, + dictWord{ + 138, + 11, + 526, + }, + dictWord{10, 0, 435}, + dictWord{7, 10, 461}, + dictWord{136, 10, 775}, + dictWord{6, 11, 144}, + dictWord{7, 11, 948}, + dictWord{7, 11, 1042}, + dictWord{ + 7, + 11, + 1857, + }, + dictWord{8, 11, 235}, + dictWord{8, 11, 461}, + dictWord{9, 11, 453}, + dictWord{9, 11, 530}, + dictWord{10, 11, 354}, + dictWord{17, 11, 77}, + dictWord{ + 19, + 11, + 99, + }, + dictWord{148, 11, 79}, + dictWord{138, 0, 966}, + dictWord{7, 0, 1644}, + dictWord{137, 0, 129}, + dictWord{135, 0, 997}, + dictWord{136, 0, 502}, + dictWord{ + 5, + 11, + 196, + }, + dictWord{6, 11, 486}, + dictWord{7, 11, 212}, + dictWord{8, 11, 309}, + dictWord{136, 11, 346}, + dictWord{7, 10, 727}, + dictWord{146, 10, 73}, + dictWord{132, 0, 823}, + dictWord{132, 11, 686}, + dictWord{135, 0, 1927}, + dictWord{4, 0, 762}, + dictWord{7, 0, 1756}, + dictWord{137, 0, 98}, + dictWord{136, 10, 577}, + dictWord{24, 0, 8}, + dictWord{4, 11, 30}, + dictWord{5, 11, 43}, + dictWord{152, 11, 8}, + dictWord{7, 0, 1046}, + dictWord{139, 0, 160}, + dictWord{7, 0, 492}, + dictWord{ + 4, + 10, + 413, + }, + dictWord{5, 10, 677}, + dictWord{7, 11, 492}, + dictWord{8, 10, 432}, + dictWord{140, 10, 280}, + dictWord{6, 0, 45}, + dictWord{7, 0, 433}, + dictWord{8, 0, 129}, + dictWord{9, 0, 21}, + dictWord{10, 0, 392}, + dictWord{11, 0, 79}, + dictWord{12, 0, 499}, + dictWord{13, 0, 199}, + dictWord{141, 0, 451}, + dictWord{7, 0, 558}, + dictWord{ + 136, + 0, + 353, + }, + dictWord{4, 11, 220}, + dictWord{7, 11, 1535}, + dictWord{9, 11, 93}, + dictWord{139, 11, 474}, + dictWord{7, 10, 646}, + dictWord{7, 10, 1730}, + dictWord{ + 11, + 10, + 446, + }, + dictWord{141, 10, 178}, + dictWord{133, 0, 785}, + dictWord{134, 0, 1145}, + dictWord{8, 0, 81}, + dictWord{9, 0, 189}, + dictWord{9, 0, 201}, + dictWord{ + 11, + 0, + 478, + }, + dictWord{11, 0, 712}, + dictWord{141, 0, 338}, + dictWord{5, 0, 353}, + dictWord{151, 0, 26}, + dictWord{11, 0, 762}, + dictWord{132, 10, 395}, + dictWord{ + 134, + 0, + 2024, + }, + dictWord{4, 0, 611}, + dictWord{133, 0, 606}, + dictWord{9, 10, 174}, + dictWord{10, 10, 164}, + dictWord{11, 10, 440}, + dictWord{11, 10, 841}, + dictWord{ + 143, + 10, + 98, + }, + dictWord{134, 10, 426}, + dictWord{10, 10, 608}, + dictWord{139, 10, 1002}, + dictWord{138, 10, 250}, + dictWord{6, 0, 25}, + dictWord{7, 0, 855}, + dictWord{7, 0, 1258}, + dictWord{144, 0, 32}, + dictWord{7, 11, 1725}, + dictWord{138, 11, 393}, + dictWord{5, 11, 263}, + dictWord{134, 11, 414}, + dictWord{6, 0, 2011}, + dictWord{133, 10, 476}, + dictWord{4, 0, 4}, + dictWord{7, 0, 1118}, + dictWord{7, 0, 1320}, + dictWord{7, 0, 1706}, + dictWord{8, 0, 277}, + dictWord{9, 0, 622}, + dictWord{ + 10, + 0, + 9, + }, + dictWord{11, 0, 724}, + dictWord{12, 0, 350}, + dictWord{12, 0, 397}, + dictWord{13, 0, 28}, + dictWord{13, 0, 159}, + dictWord{15, 0, 89}, + dictWord{18, 0, 5}, + dictWord{ + 19, + 0, + 9, + }, + dictWord{20, 0, 34}, + dictWord{22, 0, 47}, + dictWord{6, 11, 178}, + dictWord{6, 11, 1750}, + dictWord{8, 11, 251}, + dictWord{9, 11, 690}, + dictWord{ + 10, + 11, + 155, + }, + dictWord{10, 11, 196}, + dictWord{10, 11, 373}, + dictWord{11, 11, 698}, + dictWord{13, 11, 155}, + dictWord{148, 11, 93}, + dictWord{5, 11, 97}, + dictWord{ + 137, + 11, + 393, + }, + dictWord{7, 0, 764}, + dictWord{11, 0, 461}, + dictWord{12, 0, 172}, + dictWord{5, 10, 76}, + dictWord{6, 10, 458}, + dictWord{6, 10, 497}, + dictWord{ + 7, + 10, + 868, + }, + dictWord{9, 10, 658}, + dictWord{10, 10, 594}, + dictWord{11, 10, 566}, + dictWord{12, 10, 338}, + dictWord{141, 10, 200}, + dictWord{134, 0, 1449}, + dictWord{138, 11, 40}, + dictWord{134, 11, 1639}, + dictWord{134, 0, 1445}, + dictWord{6, 0, 1168}, + dictWord{4, 10, 526}, + dictWord{7, 10, 1029}, + dictWord{ + 135, + 10, + 1054, + }, + dictWord{4, 11, 191}, + dictWord{7, 11, 934}, + dictWord{8, 11, 647}, + dictWord{145, 11, 97}, + dictWord{132, 10, 636}, + dictWord{6, 0, 233}, + dictWord{ + 7, + 10, + 660, + }, + dictWord{7, 10, 1124}, + dictWord{17, 10, 31}, + dictWord{19, 10, 22}, + dictWord{151, 10, 14}, + dictWord{6, 10, 1699}, + dictWord{136, 11, 110}, + dictWord{ + 12, + 11, + 246, + }, + dictWord{15, 11, 162}, + dictWord{19, 11, 64}, + dictWord{20, 11, 8}, + dictWord{20, 11, 95}, + dictWord{22, 11, 24}, + dictWord{152, 11, 17}, + dictWord{ + 5, + 11, + 165, + }, + dictWord{9, 11, 346}, + dictWord{138, 11, 655}, + dictWord{5, 11, 319}, + dictWord{135, 11, 534}, + dictWord{134, 0, 255}, + dictWord{9, 0, 216}, + dictWord{ + 8, + 11, + 128, + }, + dictWord{139, 11, 179}, + dictWord{9, 0, 183}, + dictWord{139, 0, 286}, + dictWord{11, 0, 956}, + dictWord{151, 0, 3}, + dictWord{4, 0, 536}, + dictWord{ + 7, + 0, + 1141, + }, + dictWord{10, 0, 723}, + dictWord{139, 0, 371}, + dictWord{4, 10, 279}, + dictWord{7, 10, 301}, + dictWord{137, 10, 362}, + dictWord{7, 0, 285}, + dictWord{ + 5, + 11, + 57, + }, + dictWord{6, 11, 101}, + dictWord{6, 11, 1663}, + dictWord{7, 11, 132}, + dictWord{7, 11, 1048}, + dictWord{7, 11, 1154}, + dictWord{7, 11, 1415}, + dictWord{ + 7, + 11, + 1507, + }, + dictWord{12, 11, 493}, + dictWord{15, 11, 105}, + dictWord{151, 11, 15}, + dictWord{5, 11, 459}, + dictWord{7, 11, 1073}, + dictWord{7, 10, 1743}, + dictWord{ + 8, + 11, + 241, + }, + dictWord{136, 11, 334}, + dictWord{4, 10, 178}, + dictWord{133, 10, 399}, + dictWord{135, 0, 560}, + dictWord{132, 0, 690}, + dictWord{135, 0, 1246}, + dictWord{18, 0, 157}, + dictWord{147, 0, 63}, + dictWord{10, 0, 599}, + dictWord{11, 0, 33}, + dictWord{12, 0, 571}, + dictWord{149, 0, 1}, + dictWord{6, 11, 324}, + dictWord{ + 6, + 11, + 520, + }, + dictWord{7, 11, 338}, + dictWord{7, 11, 1616}, + dictWord{7, 11, 1729}, + dictWord{8, 11, 228}, + dictWord{9, 11, 69}, + dictWord{139, 11, 750}, + dictWord{ + 7, + 0, + 1862, + }, + dictWord{12, 0, 491}, + dictWord{12, 0, 520}, + dictWord{13, 0, 383}, + dictWord{142, 0, 244}, + dictWord{135, 11, 734}, + dictWord{134, 10, 1692}, + dictWord{10, 0, 448}, + dictWord{11, 0, 630}, + dictWord{17, 0, 117}, + dictWord{6, 10, 202}, + dictWord{7, 11, 705}, + dictWord{12, 10, 360}, + dictWord{17, 10, 118}, + dictWord{18, 10, 27}, + dictWord{148, 10, 67}, + dictWord{4, 11, 73}, + dictWord{6, 11, 612}, + dictWord{7, 11, 927}, + dictWord{7, 11, 1822}, + dictWord{8, 11, 217}, + dictWord{ + 9, + 11, + 472, + }, + dictWord{9, 11, 765}, + dictWord{9, 11, 766}, + dictWord{10, 11, 408}, + dictWord{11, 11, 51}, + dictWord{11, 11, 793}, + dictWord{12, 11, 266}, + dictWord{ + 15, + 11, + 158, + }, + dictWord{20, 11, 89}, + dictWord{150, 11, 32}, + dictWord{4, 0, 190}, + dictWord{133, 0, 554}, + dictWord{133, 0, 1001}, + dictWord{5, 11, 389}, + dictWord{ + 8, + 11, + 636, + }, + dictWord{137, 11, 229}, + dictWord{5, 0, 446}, + dictWord{7, 10, 872}, + dictWord{10, 10, 516}, + dictWord{139, 10, 167}, + dictWord{137, 10, 313}, + dictWord{132, 10, 224}, + dictWord{134, 0, 1313}, + dictWord{5, 10, 546}, + dictWord{7, 10, 35}, + dictWord{8, 10, 11}, + dictWord{8, 10, 12}, + dictWord{9, 10, 315}, + dictWord{9, 10, 533}, + dictWord{10, 10, 802}, + dictWord{11, 10, 166}, + dictWord{12, 10, 525}, + dictWord{142, 10, 243}, + dictWord{6, 0, 636}, + dictWord{137, 0, 837}, + dictWord{5, 10, 241}, + dictWord{8, 10, 242}, + dictWord{9, 10, 451}, + dictWord{10, 10, 667}, + dictWord{11, 10, 598}, + dictWord{140, 10, 429}, + dictWord{22, 10, 46}, + dictWord{150, 11, 46}, + dictWord{136, 11, 472}, + dictWord{11, 0, 278}, + dictWord{142, 0, 73}, + dictWord{141, 11, 185}, + dictWord{132, 0, 868}, + dictWord{ + 134, + 0, + 972, + }, + dictWord{4, 10, 366}, + dictWord{137, 10, 516}, + dictWord{138, 0, 1010}, + dictWord{5, 11, 189}, + dictWord{6, 10, 1736}, + dictWord{7, 11, 442}, + dictWord{ + 7, + 11, + 443, + }, + dictWord{8, 11, 281}, + dictWord{12, 11, 174}, + dictWord{13, 11, 83}, + dictWord{141, 11, 261}, + dictWord{139, 11, 384}, + dictWord{6, 11, 2}, + dictWord{ + 7, + 11, + 191, + }, + dictWord{7, 11, 446}, + dictWord{7, 11, 758}, + dictWord{7, 11, 1262}, + dictWord{7, 11, 1737}, + dictWord{8, 11, 22}, + dictWord{8, 11, 270}, + dictWord{ + 8, + 11, + 612, + }, + dictWord{9, 11, 4}, + dictWord{9, 11, 167}, + dictWord{9, 11, 312}, + dictWord{9, 11, 436}, + dictWord{10, 11, 156}, + dictWord{10, 11, 216}, + dictWord{ + 10, + 11, + 311, + }, + dictWord{10, 11, 623}, + dictWord{11, 11, 72}, + dictWord{11, 11, 330}, + dictWord{11, 11, 455}, + dictWord{12, 11, 101}, + dictWord{12, 11, 321}, + dictWord{ + 12, + 11, + 504, + }, + dictWord{12, 11, 530}, + dictWord{12, 11, 543}, + dictWord{13, 11, 17}, + dictWord{13, 11, 156}, + dictWord{13, 11, 334}, + dictWord{14, 11, 48}, + dictWord{15, 11, 70}, + dictWord{17, 11, 60}, + dictWord{148, 11, 64}, + dictWord{6, 10, 331}, + dictWord{136, 10, 623}, + dictWord{135, 0, 1231}, + dictWord{132, 0, 304}, + dictWord{6, 11, 60}, + dictWord{7, 11, 670}, + dictWord{7, 11, 1327}, + dictWord{8, 11, 411}, + dictWord{8, 11, 435}, + dictWord{9, 11, 653}, + dictWord{9, 11, 740}, + dictWord{10, 11, 385}, + dictWord{11, 11, 222}, + dictWord{11, 11, 324}, + dictWord{11, 11, 829}, + dictWord{140, 11, 611}, + dictWord{7, 0, 506}, + dictWord{6, 11, 166}, + dictWord{7, 11, 374}, + dictWord{135, 11, 1174}, + dictWord{14, 11, 43}, + dictWord{146, 11, 21}, + dictWord{135, 11, 1694}, + dictWord{135, 10, 1888}, + dictWord{ + 5, + 11, + 206, + }, + dictWord{134, 11, 398}, + dictWord{135, 11, 50}, + dictWord{150, 0, 26}, + dictWord{6, 0, 53}, + dictWord{6, 0, 199}, + dictWord{7, 0, 1408}, + dictWord{ + 8, + 0, + 32, + }, + dictWord{8, 0, 93}, + dictWord{10, 0, 397}, + dictWord{10, 0, 629}, + dictWord{11, 0, 593}, + dictWord{11, 0, 763}, + dictWord{13, 0, 326}, + dictWord{145, 0, 35}, + dictWord{134, 0, 105}, + dictWord{132, 10, 394}, + dictWord{4, 0, 843}, + dictWord{138, 0, 794}, + dictWord{11, 0, 704}, + dictWord{141, 0, 396}, + dictWord{5, 0, 114}, + dictWord{5, 0, 255}, + dictWord{141, 0, 285}, + dictWord{6, 0, 619}, + dictWord{7, 0, 898}, + dictWord{7, 0, 1092}, + dictWord{8, 0, 485}, + dictWord{18, 0, 28}, + dictWord{ + 19, + 0, + 116, + }, + dictWord{135, 10, 1931}, + dictWord{9, 0, 145}, + dictWord{7, 10, 574}, + dictWord{135, 10, 1719}, + dictWord{7, 0, 2035}, + dictWord{8, 0, 19}, + dictWord{ + 9, + 0, + 89, + }, + dictWord{138, 0, 831}, + dictWord{132, 10, 658}, + dictWord{6, 11, 517}, + dictWord{7, 11, 1159}, + dictWord{10, 11, 621}, + dictWord{139, 11, 192}, + dictWord{ + 7, + 0, + 1933, + }, + dictWord{7, 11, 1933}, + dictWord{9, 10, 781}, + dictWord{10, 10, 144}, + dictWord{11, 10, 385}, + dictWord{13, 10, 161}, + dictWord{13, 10, 228}, + dictWord{13, 10, 268}, + dictWord{148, 10, 107}, + dictWord{136, 10, 374}, + dictWord{10, 11, 223}, + dictWord{139, 11, 645}, + dictWord{135, 0, 1728}, + dictWord{ + 7, + 11, + 64, + }, + dictWord{7, 11, 289}, + dictWord{136, 11, 245}, + dictWord{4, 10, 344}, + dictWord{6, 10, 498}, + dictWord{139, 10, 323}, + dictWord{136, 0, 746}, + dictWord{ + 135, + 10, + 1063, + }, + dictWord{137, 10, 155}, + dictWord{4, 0, 987}, + dictWord{6, 0, 1964}, + dictWord{6, 0, 1974}, + dictWord{6, 0, 1990}, + dictWord{136, 0, 995}, + dictWord{133, 11, 609}, + dictWord{133, 10, 906}, + dictWord{134, 0, 1550}, + dictWord{134, 0, 874}, + dictWord{5, 11, 129}, + dictWord{6, 11, 61}, + dictWord{ + 135, + 11, + 947, + }, + dictWord{4, 0, 1018}, + dictWord{6, 0, 1938}, + dictWord{6, 0, 2021}, + dictWord{134, 0, 2039}, + dictWord{132, 0, 814}, + dictWord{11, 0, 126}, + dictWord{ + 139, + 0, + 287, + }, + dictWord{134, 0, 1264}, + dictWord{5, 0, 955}, + dictWord{136, 0, 814}, + dictWord{141, 11, 506}, + dictWord{132, 11, 314}, + dictWord{6, 0, 981}, + dictWord{139, 11, 1000}, + dictWord{5, 0, 56}, + dictWord{8, 0, 892}, + dictWord{8, 0, 915}, + dictWord{140, 0, 776}, + dictWord{148, 0, 100}, + dictWord{10, 0, 4}, + dictWord{ + 10, + 0, + 13, + }, + dictWord{11, 0, 638}, + dictWord{148, 0, 57}, + dictWord{148, 11, 74}, + dictWord{5, 0, 738}, + dictWord{132, 10, 616}, + dictWord{133, 11, 637}, + dictWord{ + 136, + 10, + 692, + }, + dictWord{133, 0, 758}, + dictWord{132, 10, 305}, + dictWord{137, 11, 590}, + dictWord{5, 11, 280}, + dictWord{135, 11, 1226}, + dictWord{ + 134, + 11, + 494, + }, + dictWord{135, 0, 1112}, + dictWord{133, 11, 281}, + dictWord{13, 0, 44}, + dictWord{14, 0, 214}, + dictWord{5, 10, 214}, + dictWord{7, 10, 603}, + dictWord{ + 8, + 10, + 611, + }, + dictWord{9, 10, 686}, + dictWord{10, 10, 88}, + dictWord{11, 10, 459}, + dictWord{11, 10, 496}, + dictWord{12, 10, 463}, + dictWord{140, 10, 590}, + dictWord{ + 139, + 0, + 328, + }, + dictWord{135, 11, 1064}, + dictWord{137, 0, 133}, + dictWord{7, 0, 168}, + dictWord{13, 0, 196}, + dictWord{141, 0, 237}, + dictWord{134, 10, 1703}, + dictWord{134, 0, 1152}, + dictWord{135, 0, 1245}, + dictWord{5, 0, 110}, + dictWord{6, 0, 169}, + dictWord{6, 0, 1702}, + dictWord{7, 0, 400}, + dictWord{8, 0, 538}, + dictWord{ + 9, + 0, + 184, + }, + dictWord{9, 0, 524}, + dictWord{140, 0, 218}, + dictWord{6, 0, 1816}, + dictWord{10, 0, 871}, + dictWord{12, 0, 769}, + dictWord{140, 0, 785}, + dictWord{ + 132, + 11, + 630, + }, + dictWord{7, 11, 33}, + dictWord{7, 11, 120}, + dictWord{8, 11, 489}, + dictWord{9, 11, 319}, + dictWord{10, 11, 820}, + dictWord{11, 11, 1004}, + dictWord{ + 12, + 11, + 379, + }, + dictWord{13, 11, 117}, + dictWord{13, 11, 412}, + dictWord{14, 11, 25}, + dictWord{15, 11, 52}, + dictWord{15, 11, 161}, + dictWord{16, 11, 47}, + dictWord{149, 11, 2}, + dictWord{6, 0, 133}, + dictWord{8, 0, 413}, + dictWord{9, 0, 353}, + dictWord{139, 0, 993}, + dictWord{145, 10, 19}, + dictWord{4, 11, 937}, + dictWord{ + 133, + 11, + 801, + }, + dictWord{134, 0, 978}, + dictWord{6, 0, 93}, + dictWord{6, 0, 1508}, + dictWord{7, 0, 1422}, + dictWord{7, 0, 1851}, + dictWord{8, 0, 673}, + dictWord{9, 0, 529}, + dictWord{140, 0, 43}, + dictWord{6, 0, 317}, + dictWord{10, 0, 512}, + dictWord{4, 10, 737}, + dictWord{11, 10, 294}, + dictWord{12, 10, 60}, + dictWord{12, 10, 437}, + dictWord{13, 10, 64}, + dictWord{13, 10, 380}, + dictWord{142, 10, 430}, + dictWord{9, 0, 371}, + dictWord{7, 11, 1591}, + dictWord{144, 11, 43}, + dictWord{6, 10, 1758}, + dictWord{8, 10, 520}, + dictWord{9, 10, 345}, + dictWord{9, 10, 403}, + dictWord{142, 10, 350}, + dictWord{5, 0, 526}, + dictWord{10, 10, 242}, + dictWord{ + 138, + 10, + 579, + }, + dictWord{9, 0, 25}, + dictWord{10, 0, 467}, + dictWord{138, 0, 559}, + dictWord{5, 10, 139}, + dictWord{7, 10, 1168}, + dictWord{138, 10, 539}, + dictWord{ + 4, + 0, + 335, + }, + dictWord{135, 0, 942}, + dictWord{140, 0, 754}, + dictWord{132, 11, 365}, + dictWord{11, 0, 182}, + dictWord{142, 0, 195}, + dictWord{142, 11, 29}, + dictWord{ + 5, + 11, + 7, + }, + dictWord{139, 11, 774}, + dictWord{4, 11, 746}, + dictWord{135, 11, 1090}, + dictWord{8, 0, 39}, + dictWord{10, 0, 773}, + dictWord{11, 0, 84}, + dictWord{ + 12, + 0, + 205, + }, + dictWord{142, 0, 1}, + dictWord{5, 0, 601}, + dictWord{5, 0, 870}, + dictWord{5, 11, 360}, + dictWord{136, 11, 237}, + dictWord{132, 0, 181}, + dictWord{ + 136, + 0, + 370, + }, + dictWord{134, 0, 1652}, + dictWord{8, 0, 358}, + dictWord{4, 10, 107}, + dictWord{7, 10, 613}, + dictWord{8, 10, 439}, + dictWord{8, 10, 504}, + dictWord{ + 9, + 10, + 501, + }, + dictWord{10, 10, 383}, + dictWord{139, 10, 477}, + dictWord{132, 10, 229}, + dictWord{137, 11, 785}, + dictWord{4, 0, 97}, + dictWord{5, 0, 147}, + dictWord{ + 6, + 0, + 286, + }, + dictWord{7, 0, 1362}, + dictWord{141, 0, 176}, + dictWord{6, 0, 537}, + dictWord{7, 0, 788}, + dictWord{7, 0, 1816}, + dictWord{132, 10, 903}, + dictWord{ + 140, + 10, + 71, + }, + dictWord{6, 0, 743}, + dictWord{134, 0, 1223}, + dictWord{6, 0, 375}, + dictWord{7, 0, 169}, + dictWord{7, 0, 254}, + dictWord{8, 0, 780}, + dictWord{135, 11, 1493}, + dictWord{7, 0, 1714}, + dictWord{4, 10, 47}, + dictWord{6, 10, 373}, + dictWord{7, 10, 452}, + dictWord{7, 10, 543}, + dictWord{7, 10, 1856}, + dictWord{9, 10, 6}, + dictWord{ + 11, + 10, + 257, + }, + dictWord{139, 10, 391}, + dictWord{6, 0, 896}, + dictWord{136, 0, 1003}, + dictWord{135, 0, 1447}, + dictWord{137, 11, 341}, + dictWord{5, 10, 980}, + dictWord{134, 10, 1754}, + dictWord{145, 11, 22}, + dictWord{4, 11, 277}, + dictWord{5, 11, 608}, + dictWord{6, 11, 493}, + dictWord{7, 11, 457}, + dictWord{ + 140, + 11, + 384, + }, + dictWord{7, 10, 536}, + dictWord{7, 10, 1331}, + dictWord{136, 10, 143}, + dictWord{140, 0, 744}, + dictWord{7, 11, 27}, + dictWord{135, 11, 316}, + dictWord{ + 18, + 0, + 126, + }, + dictWord{5, 10, 19}, + dictWord{134, 10, 533}, + dictWord{4, 0, 788}, + dictWord{11, 0, 41}, + dictWord{5, 11, 552}, + dictWord{5, 11, 586}, + dictWord{ + 5, + 11, + 676, + }, + dictWord{6, 11, 448}, + dictWord{8, 11, 244}, + dictWord{11, 11, 1}, + dictWord{11, 11, 41}, + dictWord{13, 11, 3}, + dictWord{16, 11, 54}, + dictWord{17, 11, 4}, + dictWord{146, 11, 13}, + dictWord{4, 0, 985}, + dictWord{6, 0, 1801}, + dictWord{4, 11, 401}, + dictWord{137, 11, 264}, + dictWord{5, 10, 395}, + dictWord{5, 10, 951}, + dictWord{134, 10, 1776}, + dictWord{5, 0, 629}, + dictWord{135, 0, 1549}, + dictWord{11, 10, 663}, + dictWord{12, 10, 210}, + dictWord{13, 10, 166}, + dictWord{ + 13, + 10, + 310, + }, + dictWord{14, 10, 373}, + dictWord{147, 10, 43}, + dictWord{9, 11, 543}, + dictWord{10, 11, 524}, + dictWord{11, 11, 30}, + dictWord{12, 11, 524}, + dictWord{ + 14, + 11, + 315, + }, + dictWord{16, 11, 18}, + dictWord{20, 11, 26}, + dictWord{148, 11, 65}, + dictWord{4, 11, 205}, + dictWord{5, 11, 623}, + dictWord{7, 11, 104}, + dictWord{ + 136, + 11, + 519, + }, + dictWord{5, 0, 293}, + dictWord{134, 0, 601}, + dictWord{7, 11, 579}, + dictWord{9, 11, 41}, + dictWord{9, 11, 244}, + dictWord{9, 11, 669}, + dictWord{ + 10, + 11, + 5, + }, + dictWord{11, 11, 861}, + dictWord{11, 11, 951}, + dictWord{139, 11, 980}, + dictWord{132, 11, 717}, + dictWord{132, 10, 695}, + dictWord{7, 10, 497}, + dictWord{ + 9, + 10, + 387, + }, + dictWord{147, 10, 81}, + dictWord{132, 0, 420}, + dictWord{142, 0, 37}, + dictWord{6, 0, 1134}, + dictWord{6, 0, 1900}, + dictWord{12, 0, 830}, + dictWord{ + 12, + 0, + 878, + }, + dictWord{12, 0, 894}, + dictWord{15, 0, 221}, + dictWord{143, 0, 245}, + dictWord{132, 11, 489}, + dictWord{7, 0, 1570}, + dictWord{140, 0, 542}, + dictWord{ + 8, + 0, + 933, + }, + dictWord{136, 0, 957}, + dictWord{6, 0, 1371}, + dictWord{7, 0, 31}, + dictWord{8, 0, 373}, + dictWord{5, 10, 284}, + dictWord{6, 10, 49}, + dictWord{6, 10, 350}, + dictWord{7, 10, 377}, + dictWord{7, 10, 1693}, + dictWord{8, 10, 678}, + dictWord{9, 10, 161}, + dictWord{9, 10, 585}, + dictWord{9, 10, 671}, + dictWord{9, 10, 839}, + dictWord{11, 10, 912}, + dictWord{141, 10, 427}, + dictWord{135, 11, 892}, + dictWord{4, 0, 325}, + dictWord{138, 0, 125}, + dictWord{139, 11, 47}, + dictWord{ + 132, + 10, + 597, + }, + dictWord{138, 0, 323}, + dictWord{6, 0, 1547}, + dictWord{7, 11, 1605}, + dictWord{9, 11, 473}, + dictWord{11, 11, 962}, + dictWord{146, 11, 139}, + dictWord{ + 139, + 10, + 908, + }, + dictWord{7, 11, 819}, + dictWord{9, 11, 26}, + dictWord{9, 11, 392}, + dictWord{10, 11, 152}, + dictWord{10, 11, 226}, + dictWord{11, 11, 19}, + dictWord{ + 12, + 11, + 276, + }, + dictWord{12, 11, 426}, + dictWord{12, 11, 589}, + dictWord{13, 11, 460}, + dictWord{15, 11, 97}, + dictWord{19, 11, 48}, + dictWord{148, 11, 104}, + dictWord{135, 11, 51}, + dictWord{4, 0, 718}, + dictWord{135, 0, 1216}, + dictWord{6, 0, 1896}, + dictWord{6, 0, 1905}, + dictWord{6, 0, 1912}, + dictWord{9, 0, 947}, + dictWord{ + 9, + 0, + 974, + }, + dictWord{12, 0, 809}, + dictWord{12, 0, 850}, + dictWord{12, 0, 858}, + dictWord{12, 0, 874}, + dictWord{12, 0, 887}, + dictWord{12, 0, 904}, + dictWord{ + 12, + 0, + 929, + }, + dictWord{12, 0, 948}, + dictWord{12, 0, 952}, + dictWord{15, 0, 198}, + dictWord{15, 0, 206}, + dictWord{15, 0, 220}, + dictWord{15, 0, 227}, + dictWord{15, 0, 247}, + dictWord{18, 0, 188}, + dictWord{21, 0, 48}, + dictWord{21, 0, 50}, + dictWord{24, 0, 25}, + dictWord{24, 0, 29}, + dictWord{7, 11, 761}, + dictWord{7, 11, 1051}, + dictWord{ + 137, + 11, + 545, + }, + dictWord{5, 0, 124}, + dictWord{5, 0, 144}, + dictWord{6, 0, 548}, + dictWord{7, 0, 15}, + dictWord{7, 0, 153}, + dictWord{137, 0, 629}, + dictWord{ + 135, + 11, + 606, + }, + dictWord{135, 10, 2014}, + dictWord{7, 10, 2007}, + dictWord{9, 11, 46}, + dictWord{9, 10, 101}, + dictWord{9, 10, 450}, + dictWord{10, 10, 66}, + dictWord{ + 10, + 10, + 842, + }, + dictWord{11, 10, 536}, + dictWord{140, 10, 587}, + dictWord{6, 0, 75}, + dictWord{7, 0, 1531}, + dictWord{8, 0, 416}, + dictWord{9, 0, 240}, + dictWord{9, 0, 275}, + dictWord{10, 0, 100}, + dictWord{11, 0, 658}, + dictWord{11, 0, 979}, + dictWord{12, 0, 86}, + dictWord{14, 0, 207}, + dictWord{15, 0, 20}, + dictWord{143, 0, 25}, + dictWord{ + 5, + 0, + 141, + }, + dictWord{5, 0, 915}, + dictWord{6, 0, 1783}, + dictWord{7, 0, 211}, + dictWord{7, 0, 698}, + dictWord{7, 0, 1353}, + dictWord{9, 0, 83}, + dictWord{9, 0, 281}, + dictWord{ + 10, + 0, + 376, + }, + dictWord{10, 0, 431}, + dictWord{11, 0, 543}, + dictWord{12, 0, 664}, + dictWord{13, 0, 280}, + dictWord{13, 0, 428}, + dictWord{14, 0, 61}, + dictWord{ + 14, + 0, + 128, + }, + dictWord{17, 0, 52}, + dictWord{145, 0, 81}, + dictWord{132, 11, 674}, + dictWord{135, 0, 533}, + dictWord{149, 0, 6}, + dictWord{132, 11, 770}, + dictWord{ + 133, + 0, + 538, + }, + dictWord{5, 11, 79}, + dictWord{7, 11, 1027}, + dictWord{7, 11, 1477}, + dictWord{139, 11, 52}, + dictWord{139, 10, 62}, + dictWord{4, 0, 338}, + dictWord{ + 133, + 0, + 400, + }, + dictWord{5, 11, 789}, + dictWord{134, 11, 195}, + dictWord{4, 11, 251}, + dictWord{4, 11, 688}, + dictWord{7, 11, 513}, + dictWord{7, 11, 1284}, + dictWord{ + 9, + 11, + 87, + }, + dictWord{138, 11, 365}, + dictWord{134, 10, 1766}, + dictWord{6, 0, 0}, + dictWord{7, 0, 84}, + dictWord{11, 0, 895}, + dictWord{145, 0, 11}, + dictWord{ + 139, + 0, + 892, + }, + dictWord{4, 0, 221}, + dictWord{5, 0, 659}, + dictWord{7, 0, 697}, + dictWord{7, 0, 1211}, + dictWord{138, 0, 284}, + dictWord{133, 0, 989}, + dictWord{ + 133, + 11, + 889, + }, + dictWord{4, 11, 160}, + dictWord{5, 11, 330}, + dictWord{7, 11, 1434}, + dictWord{136, 11, 174}, + dictWord{6, 10, 1665}, + dictWord{7, 10, 256}, + dictWord{ + 7, + 10, + 1388, + }, + dictWord{10, 10, 499}, + dictWord{139, 10, 670}, + dictWord{7, 0, 848}, + dictWord{4, 10, 22}, + dictWord{5, 10, 10}, + dictWord{136, 10, 97}, + dictWord{ + 138, + 0, + 507, + }, + dictWord{133, 10, 481}, + dictWord{4, 0, 188}, + dictWord{135, 0, 805}, + dictWord{5, 0, 884}, + dictWord{6, 0, 732}, + dictWord{139, 0, 991}, + dictWord{ + 135, + 11, + 968, + }, + dictWord{11, 11, 636}, + dictWord{15, 11, 145}, + dictWord{17, 11, 34}, + dictWord{19, 11, 50}, + dictWord{151, 11, 20}, + dictWord{7, 0, 959}, + dictWord{ + 16, + 0, + 60, + }, + dictWord{6, 10, 134}, + dictWord{7, 10, 437}, + dictWord{9, 10, 37}, + dictWord{14, 10, 285}, + dictWord{142, 10, 371}, + dictWord{7, 10, 486}, + dictWord{ + 8, + 10, + 155, + }, + dictWord{11, 10, 93}, + dictWord{140, 10, 164}, + dictWord{134, 0, 1653}, + dictWord{7, 0, 337}, + dictWord{133, 10, 591}, + dictWord{6, 0, 1989}, + dictWord{ + 8, + 0, + 922, + }, + dictWord{8, 0, 978}, + dictWord{133, 11, 374}, + dictWord{132, 0, 638}, + dictWord{138, 0, 500}, + dictWord{133, 11, 731}, + dictWord{5, 10, 380}, + dictWord{ + 5, + 10, + 650, + }, + dictWord{136, 10, 310}, + dictWord{138, 11, 381}, + dictWord{4, 10, 364}, + dictWord{7, 10, 1156}, + dictWord{7, 10, 1187}, + dictWord{137, 10, 409}, + dictWord{137, 11, 224}, + dictWord{140, 0, 166}, + dictWord{134, 10, 482}, + dictWord{4, 11, 626}, + dictWord{5, 11, 642}, + dictWord{6, 11, 425}, + dictWord{ + 10, + 11, + 202, + }, + dictWord{139, 11, 141}, + dictWord{4, 10, 781}, + dictWord{6, 10, 487}, + dictWord{7, 10, 926}, + dictWord{8, 10, 263}, + dictWord{139, 10, 500}, + dictWord{ + 135, + 0, + 418, + }, + dictWord{4, 10, 94}, + dictWord{135, 10, 1265}, + dictWord{136, 0, 760}, + dictWord{132, 10, 417}, + dictWord{136, 11, 835}, + dictWord{5, 10, 348}, + dictWord{134, 10, 522}, + dictWord{6, 0, 1277}, + dictWord{134, 0, 1538}, + dictWord{139, 11, 541}, + dictWord{135, 11, 1597}, + dictWord{5, 11, 384}, + dictWord{ + 8, + 11, + 455, + }, + dictWord{140, 11, 48}, + dictWord{136, 0, 770}, + dictWord{5, 11, 264}, + dictWord{134, 11, 184}, + dictWord{4, 0, 89}, + dictWord{5, 0, 489}, + dictWord{ + 6, + 0, + 315, + }, + dictWord{7, 0, 553}, + dictWord{7, 0, 1745}, + dictWord{138, 0, 243}, + dictWord{4, 10, 408}, + dictWord{4, 10, 741}, + dictWord{135, 10, 500}, + dictWord{ + 134, + 0, + 1396, + }, + dictWord{133, 0, 560}, + dictWord{6, 0, 1658}, + dictWord{9, 0, 3}, + dictWord{10, 0, 154}, + dictWord{11, 0, 641}, + dictWord{13, 0, 85}, + dictWord{13, 0, 201}, + dictWord{141, 0, 346}, + dictWord{135, 11, 1595}, + dictWord{5, 11, 633}, + dictWord{6, 11, 28}, + dictWord{7, 11, 219}, + dictWord{135, 11, 1323}, + dictWord{ + 9, + 11, + 769, + }, + dictWord{140, 11, 185}, + dictWord{135, 11, 785}, + dictWord{7, 11, 359}, + dictWord{8, 11, 243}, + dictWord{140, 11, 175}, + dictWord{138, 0, 586}, + dictWord{ + 7, + 0, + 1271, + }, + dictWord{134, 10, 73}, + dictWord{132, 11, 105}, + dictWord{4, 0, 166}, + dictWord{5, 0, 505}, + dictWord{134, 0, 1670}, + dictWord{133, 10, 576}, + dictWord{4, 11, 324}, + dictWord{138, 11, 104}, + dictWord{142, 10, 231}, + dictWord{6, 0, 637}, + dictWord{7, 10, 1264}, + dictWord{7, 10, 1678}, + dictWord{ + 11, + 10, + 945, + }, + dictWord{12, 10, 341}, + dictWord{12, 10, 471}, + dictWord{12, 10, 569}, + dictWord{23, 11, 21}, + dictWord{151, 11, 23}, + dictWord{8, 11, 559}, + dictWord{ + 141, + 11, + 109, + }, + dictWord{134, 0, 1947}, + dictWord{7, 0, 445}, + dictWord{8, 0, 307}, + dictWord{8, 0, 704}, + dictWord{10, 0, 41}, + dictWord{10, 0, 439}, + dictWord{ + 11, + 0, + 237, + }, + dictWord{11, 0, 622}, + dictWord{140, 0, 201}, + dictWord{135, 11, 963}, + dictWord{135, 0, 1977}, + dictWord{4, 0, 189}, + dictWord{5, 0, 713}, + dictWord{ + 136, + 0, + 57, + }, + dictWord{138, 0, 371}, + dictWord{135, 10, 538}, + dictWord{132, 0, 552}, + dictWord{6, 0, 883}, + dictWord{133, 10, 413}, + dictWord{6, 0, 923}, + dictWord{ + 132, + 11, + 758, + }, + dictWord{138, 11, 215}, + dictWord{136, 10, 495}, + dictWord{7, 10, 54}, + dictWord{8, 10, 312}, + dictWord{10, 10, 191}, + dictWord{10, 10, 614}, + dictWord{140, 10, 567}, + dictWord{7, 11, 351}, + dictWord{139, 11, 128}, + dictWord{7, 0, 875}, + dictWord{6, 10, 468}, + dictWord{7, 10, 1478}, + dictWord{8, 10, 530}, + dictWord{142, 10, 290}, + dictWord{135, 0, 1788}, + dictWord{17, 0, 49}, + dictWord{133, 11, 918}, + dictWord{12, 11, 398}, + dictWord{20, 11, 39}, + dictWord{ + 21, + 11, + 11, + }, + dictWord{150, 11, 41}, + dictWord{10, 0, 661}, + dictWord{6, 10, 484}, + dictWord{135, 10, 822}, + dictWord{135, 0, 1945}, + dictWord{134, 0, 794}, + dictWord{ + 137, + 10, + 900, + }, + dictWord{135, 10, 1335}, + dictWord{6, 10, 1724}, + dictWord{135, 10, 2022}, + dictWord{132, 11, 340}, + dictWord{134, 0, 1135}, + dictWord{ + 4, + 0, + 784, + }, + dictWord{133, 0, 745}, + dictWord{5, 0, 84}, + dictWord{134, 0, 163}, + dictWord{133, 0, 410}, + dictWord{4, 0, 976}, + dictWord{5, 11, 985}, + dictWord{7, 11, 509}, + dictWord{7, 11, 529}, + dictWord{145, 11, 96}, + dictWord{132, 10, 474}, + dictWord{134, 0, 703}, + dictWord{135, 11, 1919}, + dictWord{5, 0, 322}, + dictWord{ + 8, + 0, + 186, + }, + dictWord{9, 0, 262}, + dictWord{10, 0, 187}, + dictWord{142, 0, 208}, + dictWord{135, 10, 1504}, + dictWord{133, 0, 227}, + dictWord{9, 0, 560}, + dictWord{ + 13, + 0, + 208, + }, + dictWord{133, 10, 305}, + dictWord{132, 11, 247}, + dictWord{7, 0, 1395}, + dictWord{8, 0, 486}, + dictWord{9, 0, 236}, + dictWord{9, 0, 878}, + dictWord{ + 10, + 0, + 218, + }, + dictWord{11, 0, 95}, + dictWord{19, 0, 17}, + dictWord{147, 0, 31}, + dictWord{7, 0, 2043}, + dictWord{8, 0, 672}, + dictWord{141, 0, 448}, + dictWord{4, 11, 184}, + dictWord{5, 11, 390}, + dictWord{6, 11, 337}, + dictWord{7, 11, 23}, + dictWord{7, 11, 494}, + dictWord{7, 11, 618}, + dictWord{7, 11, 1456}, + dictWord{8, 11, 27}, + dictWord{ + 8, + 11, + 599, + }, + dictWord{10, 11, 153}, + dictWord{139, 11, 710}, + dictWord{135, 0, 466}, + dictWord{135, 10, 1236}, + dictWord{6, 0, 167}, + dictWord{7, 0, 186}, + dictWord{7, 0, 656}, + dictWord{10, 0, 643}, + dictWord{4, 10, 480}, + dictWord{6, 10, 302}, + dictWord{6, 10, 1642}, + dictWord{7, 10, 837}, + dictWord{7, 10, 1547}, + dictWord{ + 7, + 10, + 1657, + }, + dictWord{8, 10, 429}, + dictWord{9, 10, 228}, + dictWord{13, 10, 289}, + dictWord{13, 10, 343}, + dictWord{147, 10, 101}, + dictWord{134, 0, 1428}, + dictWord{134, 0, 1440}, + dictWord{5, 0, 412}, + dictWord{7, 10, 278}, + dictWord{10, 10, 739}, + dictWord{11, 10, 708}, + dictWord{141, 10, 348}, + dictWord{ + 134, + 0, + 1118, + }, + dictWord{136, 0, 562}, + dictWord{148, 11, 46}, + dictWord{9, 0, 316}, + dictWord{139, 0, 256}, + dictWord{134, 0, 1771}, + dictWord{135, 0, 1190}, + dictWord{137, 0, 132}, + dictWord{10, 11, 227}, + dictWord{11, 11, 497}, + dictWord{11, 11, 709}, + dictWord{140, 11, 415}, + dictWord{143, 0, 66}, + dictWord{6, 11, 360}, + dictWord{7, 11, 1664}, + dictWord{136, 11, 478}, + dictWord{144, 10, 28}, + dictWord{4, 0, 317}, + dictWord{135, 0, 1279}, + dictWord{5, 0, 63}, + dictWord{ + 133, + 0, + 509, + }, + dictWord{136, 11, 699}, + dictWord{145, 10, 36}, + dictWord{134, 0, 1475}, + dictWord{11, 11, 343}, + dictWord{142, 11, 127}, + dictWord{132, 11, 739}, + dictWord{132, 0, 288}, + dictWord{135, 11, 1757}, + dictWord{8, 0, 89}, + dictWord{8, 0, 620}, + dictWord{9, 0, 608}, + dictWord{11, 0, 628}, + dictWord{12, 0, 322}, + dictWord{143, 0, 124}, + dictWord{134, 0, 1225}, + dictWord{7, 0, 1189}, + dictWord{4, 11, 67}, + dictWord{5, 11, 422}, + dictWord{6, 10, 363}, + dictWord{7, 11, 1037}, + dictWord{7, 11, 1289}, + dictWord{7, 11, 1555}, + dictWord{7, 10, 1955}, + dictWord{8, 10, 725}, + dictWord{9, 11, 741}, + dictWord{145, 11, 108}, + dictWord{ + 134, + 0, + 1468, + }, + dictWord{6, 0, 689}, + dictWord{134, 0, 1451}, + dictWord{138, 0, 120}, + dictWord{151, 0, 1}, + dictWord{137, 10, 805}, + dictWord{142, 0, 329}, + dictWord{ + 5, + 10, + 813, + }, + dictWord{135, 10, 2046}, + dictWord{135, 0, 226}, + dictWord{138, 11, 96}, + dictWord{7, 0, 1855}, + dictWord{5, 10, 712}, + dictWord{11, 10, 17}, + dictWord{13, 10, 321}, + dictWord{144, 10, 67}, + dictWord{9, 0, 461}, + dictWord{6, 10, 320}, + dictWord{7, 10, 781}, + dictWord{7, 10, 1921}, + dictWord{9, 10, 55}, + dictWord{ + 10, + 10, + 186, + }, + dictWord{10, 10, 273}, + dictWord{10, 10, 664}, + dictWord{10, 10, 801}, + dictWord{11, 10, 996}, + dictWord{11, 10, 997}, + dictWord{13, 10, 157}, + dictWord{142, 10, 170}, + dictWord{8, 11, 203}, + dictWord{8, 10, 271}, + dictWord{11, 11, 823}, + dictWord{11, 11, 846}, + dictWord{12, 11, 482}, + dictWord{ + 13, + 11, + 133, + }, + dictWord{13, 11, 277}, + dictWord{13, 11, 302}, + dictWord{13, 11, 464}, + dictWord{14, 11, 205}, + dictWord{142, 11, 221}, + dictWord{135, 0, 1346}, + dictWord{4, 11, 449}, + dictWord{133, 11, 718}, + dictWord{134, 0, 85}, + dictWord{14, 0, 299}, + dictWord{7, 10, 103}, + dictWord{7, 10, 863}, + dictWord{11, 10, 184}, + dictWord{145, 10, 62}, + dictWord{4, 11, 355}, + dictWord{6, 11, 311}, + dictWord{9, 11, 256}, + dictWord{138, 11, 404}, + dictWord{137, 10, 659}, + dictWord{ + 138, + 11, + 758, + }, + dictWord{133, 11, 827}, + dictWord{5, 11, 64}, + dictWord{140, 11, 581}, + dictWord{134, 0, 1171}, + dictWord{4, 11, 442}, + dictWord{7, 11, 1047}, + dictWord{ + 7, + 11, + 1352, + }, + dictWord{135, 11, 1643}, + dictWord{132, 0, 980}, + dictWord{5, 11, 977}, + dictWord{6, 11, 288}, + dictWord{7, 11, 528}, + dictWord{135, 11, 1065}, + dictWord{5, 0, 279}, + dictWord{6, 0, 235}, + dictWord{7, 0, 468}, + dictWord{8, 0, 446}, + dictWord{9, 0, 637}, + dictWord{10, 0, 717}, + dictWord{11, 0, 738}, + dictWord{ + 140, + 0, + 514, + }, + dictWord{132, 0, 293}, + dictWord{11, 10, 337}, + dictWord{142, 10, 303}, + dictWord{136, 11, 285}, + dictWord{5, 0, 17}, + dictWord{6, 0, 371}, + dictWord{ + 9, + 0, + 528, + }, + dictWord{12, 0, 364}, + dictWord{132, 11, 254}, + dictWord{5, 10, 77}, + dictWord{7, 10, 1455}, + dictWord{10, 10, 843}, + dictWord{147, 10, 73}, + dictWord{ + 150, + 0, + 5, + }, + dictWord{132, 10, 458}, + dictWord{6, 11, 12}, + dictWord{7, 11, 1219}, + dictWord{145, 11, 73}, + dictWord{135, 10, 1420}, + dictWord{6, 10, 109}, + dictWord{138, 10, 382}, + dictWord{135, 11, 125}, + dictWord{6, 10, 330}, + dictWord{7, 10, 1084}, + dictWord{139, 10, 142}, + dictWord{6, 11, 369}, + dictWord{ + 6, + 11, + 502, + }, + dictWord{7, 11, 1036}, + dictWord{8, 11, 348}, + dictWord{9, 11, 452}, + dictWord{10, 11, 26}, + dictWord{11, 11, 224}, + dictWord{11, 11, 387}, + dictWord{ + 11, + 11, + 772, + }, + dictWord{12, 11, 95}, + dictWord{12, 11, 629}, + dictWord{13, 11, 195}, + dictWord{13, 11, 207}, + dictWord{13, 11, 241}, + dictWord{14, 11, 260}, + dictWord{ + 14, + 11, + 270, + }, + dictWord{143, 11, 140}, + dictWord{132, 11, 269}, + dictWord{5, 11, 480}, + dictWord{7, 11, 532}, + dictWord{7, 11, 1197}, + dictWord{7, 11, 1358}, + dictWord{8, 11, 291}, + dictWord{11, 11, 349}, + dictWord{142, 11, 396}, + dictWord{150, 0, 48}, + dictWord{10, 0, 601}, + dictWord{13, 0, 353}, + dictWord{141, 0, 376}, + dictWord{5, 0, 779}, + dictWord{5, 0, 807}, + dictWord{6, 0, 1655}, + dictWord{134, 0, 1676}, + dictWord{142, 11, 223}, + dictWord{4, 0, 196}, + dictWord{5, 0, 558}, + dictWord{133, 0, 949}, + dictWord{148, 11, 15}, + dictWord{135, 11, 1764}, + dictWord{134, 0, 1322}, + dictWord{132, 0, 752}, + dictWord{139, 0, 737}, + dictWord{ + 135, + 11, + 657, + }, + dictWord{136, 11, 533}, + dictWord{135, 0, 412}, + dictWord{4, 0, 227}, + dictWord{5, 0, 159}, + dictWord{5, 0, 409}, + dictWord{7, 0, 80}, + dictWord{8, 0, 556}, + dictWord{10, 0, 479}, + dictWord{12, 0, 418}, + dictWord{14, 0, 50}, + dictWord{14, 0, 123}, + dictWord{14, 0, 192}, + dictWord{14, 0, 249}, + dictWord{14, 0, 295}, + dictWord{143, 0, 27}, + dictWord{7, 0, 1470}, + dictWord{8, 0, 66}, + dictWord{8, 0, 137}, + dictWord{8, 0, 761}, + dictWord{9, 0, 638}, + dictWord{11, 0, 80}, + dictWord{11, 0, 212}, + dictWord{11, 0, 368}, + dictWord{11, 0, 418}, + dictWord{12, 0, 8}, + dictWord{13, 0, 15}, + dictWord{16, 0, 61}, + dictWord{17, 0, 59}, + dictWord{19, 0, 28}, + dictWord{ + 148, + 0, + 84, + }, + dictWord{135, 10, 1985}, + dictWord{4, 11, 211}, + dictWord{4, 11, 332}, + dictWord{5, 11, 335}, + dictWord{6, 11, 238}, + dictWord{7, 11, 269}, + dictWord{ + 7, + 11, + 811, + }, + dictWord{7, 11, 1797}, + dictWord{8, 10, 122}, + dictWord{8, 11, 836}, + dictWord{9, 11, 507}, + dictWord{141, 11, 242}, + dictWord{6, 0, 683}, + dictWord{ + 134, + 0, + 1252, + }, + dictWord{4, 0, 873}, + dictWord{132, 10, 234}, + dictWord{134, 0, 835}, + dictWord{6, 0, 38}, + dictWord{7, 0, 1220}, + dictWord{8, 0, 185}, + dictWord{8, 0, 256}, + dictWord{9, 0, 22}, + dictWord{9, 0, 331}, + dictWord{10, 0, 738}, + dictWord{11, 0, 205}, + dictWord{11, 0, 540}, + dictWord{11, 0, 746}, + dictWord{13, 0, 465}, + dictWord{ + 14, + 0, + 88, + }, + dictWord{142, 0, 194}, + dictWord{138, 0, 986}, + dictWord{5, 11, 1009}, + dictWord{12, 11, 582}, + dictWord{146, 11, 131}, + dictWord{4, 0, 159}, + dictWord{ + 6, + 0, + 115, + }, + dictWord{7, 0, 252}, + dictWord{7, 0, 257}, + dictWord{7, 0, 1928}, + dictWord{8, 0, 69}, + dictWord{9, 0, 384}, + dictWord{10, 0, 91}, + dictWord{10, 0, 615}, + dictWord{ + 12, + 0, + 375, + }, + dictWord{14, 0, 235}, + dictWord{18, 0, 117}, + dictWord{147, 0, 123}, + dictWord{133, 0, 911}, + dictWord{136, 0, 278}, + dictWord{5, 10, 430}, + dictWord{ + 5, + 10, + 932, + }, + dictWord{6, 10, 131}, + dictWord{7, 10, 417}, + dictWord{9, 10, 522}, + dictWord{11, 10, 314}, + dictWord{141, 10, 390}, + dictWord{14, 10, 149}, + dictWord{14, 10, 399}, + dictWord{143, 10, 57}, + dictWord{4, 0, 151}, + dictWord{7, 0, 1567}, + dictWord{136, 0, 749}, + dictWord{5, 11, 228}, + dictWord{6, 11, 203}, + dictWord{ + 7, + 11, + 156, + }, + dictWord{8, 11, 347}, + dictWord{137, 11, 265}, + dictWord{132, 10, 507}, + dictWord{10, 0, 989}, + dictWord{140, 0, 956}, + dictWord{133, 0, 990}, + dictWord{5, 0, 194}, + dictWord{6, 0, 927}, + dictWord{7, 0, 1662}, + dictWord{9, 0, 90}, + dictWord{140, 0, 564}, + dictWord{4, 10, 343}, + dictWord{133, 10, 511}, + dictWord{133, 0, 425}, + dictWord{7, 10, 455}, + dictWord{138, 10, 591}, + dictWord{4, 0, 774}, + dictWord{7, 11, 476}, + dictWord{7, 11, 1592}, + dictWord{138, 11, 87}, + dictWord{5, 0, 971}, + dictWord{135, 10, 1381}, + dictWord{5, 11, 318}, + dictWord{147, 11, 121}, + dictWord{5, 11, 291}, + dictWord{7, 11, 765}, + dictWord{9, 11, 389}, + dictWord{140, 11, 548}, + dictWord{134, 10, 575}, + dictWord{4, 0, 827}, + dictWord{12, 0, 646}, + dictWord{12, 0, 705}, + dictWord{12, 0, 712}, + dictWord{140, 0, 714}, + dictWord{139, 0, 752}, + dictWord{137, 0, 662}, + dictWord{5, 0, 72}, + dictWord{6, 0, 264}, + dictWord{7, 0, 21}, + dictWord{7, 0, 46}, + dictWord{7, 0, 2013}, + dictWord{ + 8, + 0, + 215, + }, + dictWord{8, 0, 513}, + dictWord{10, 0, 266}, + dictWord{139, 0, 22}, + dictWord{139, 11, 522}, + dictWord{6, 0, 239}, + dictWord{7, 0, 118}, + dictWord{10, 0, 95}, + dictWord{11, 0, 603}, + dictWord{13, 0, 443}, + dictWord{14, 0, 160}, + dictWord{143, 0, 4}, + dictWord{6, 0, 431}, + dictWord{134, 0, 669}, + dictWord{7, 10, 1127}, + dictWord{ + 7, + 10, + 1572, + }, + dictWord{10, 10, 297}, + dictWord{10, 10, 422}, + dictWord{11, 10, 764}, + dictWord{11, 10, 810}, + dictWord{12, 10, 264}, + dictWord{13, 10, 102}, + dictWord{13, 10, 300}, + dictWord{13, 10, 484}, + dictWord{14, 10, 147}, + dictWord{14, 10, 229}, + dictWord{17, 10, 71}, + dictWord{18, 10, 118}, + dictWord{ + 147, + 10, + 120, + }, + dictWord{5, 0, 874}, + dictWord{6, 0, 1677}, + dictWord{15, 0, 0}, + dictWord{10, 11, 525}, + dictWord{139, 11, 82}, + dictWord{6, 0, 65}, + dictWord{7, 0, 939}, + dictWord{ + 7, + 0, + 1172, + }, + dictWord{7, 0, 1671}, + dictWord{9, 0, 540}, + dictWord{10, 0, 696}, + dictWord{11, 0, 265}, + dictWord{11, 0, 732}, + dictWord{11, 0, 928}, + dictWord{ + 11, + 0, + 937, + }, + dictWord{141, 0, 438}, + dictWord{134, 0, 1350}, + dictWord{136, 11, 547}, + dictWord{132, 11, 422}, + dictWord{5, 11, 355}, + dictWord{145, 11, 0}, + dictWord{137, 11, 905}, + dictWord{5, 0, 682}, + dictWord{135, 0, 1887}, + dictWord{132, 0, 809}, + dictWord{4, 0, 696}, + dictWord{133, 11, 865}, + dictWord{6, 0, 1074}, + dictWord{6, 0, 1472}, + dictWord{14, 10, 35}, + dictWord{142, 10, 191}, + dictWord{5, 11, 914}, + dictWord{134, 11, 1625}, + dictWord{133, 11, 234}, + dictWord{ + 135, + 11, + 1383, + }, + dictWord{137, 11, 780}, + dictWord{132, 10, 125}, + dictWord{4, 0, 726}, + dictWord{133, 0, 630}, + dictWord{8, 0, 802}, + dictWord{136, 0, 838}, + dictWord{132, 10, 721}, + dictWord{6, 0, 1337}, + dictWord{7, 0, 776}, + dictWord{19, 0, 56}, + dictWord{136, 10, 145}, + dictWord{132, 0, 970}, + dictWord{7, 10, 792}, + dictWord{8, 10, 147}, + dictWord{10, 10, 821}, + dictWord{139, 10, 1021}, + dictWord{139, 10, 970}, + dictWord{8, 0, 940}, + dictWord{137, 0, 797}, + dictWord{ + 135, + 11, + 1312, + }, + dictWord{9, 0, 248}, + dictWord{10, 0, 400}, + dictWord{7, 11, 816}, + dictWord{7, 11, 1241}, + dictWord{7, 10, 1999}, + dictWord{9, 11, 283}, + dictWord{ + 9, + 11, + 520, + }, + dictWord{10, 11, 213}, + dictWord{10, 11, 307}, + dictWord{10, 11, 463}, + dictWord{10, 11, 671}, + dictWord{10, 11, 746}, + dictWord{11, 11, 401}, + dictWord{ + 11, + 11, + 794, + }, + dictWord{12, 11, 517}, + dictWord{18, 11, 107}, + dictWord{147, 11, 115}, + dictWord{6, 0, 1951}, + dictWord{134, 0, 2040}, + dictWord{ + 135, + 11, + 339, + }, + dictWord{13, 0, 41}, + dictWord{15, 0, 93}, + dictWord{5, 10, 168}, + dictWord{5, 10, 930}, + dictWord{8, 10, 74}, + dictWord{9, 10, 623}, + dictWord{12, 10, 500}, + dictWord{140, 10, 579}, + dictWord{6, 0, 118}, + dictWord{7, 0, 215}, + dictWord{7, 0, 1521}, + dictWord{140, 0, 11}, + dictWord{6, 10, 220}, + dictWord{7, 10, 1101}, + dictWord{141, 10, 105}, + dictWord{6, 11, 421}, + dictWord{7, 11, 61}, + dictWord{7, 11, 1540}, + dictWord{10, 11, 11}, + dictWord{138, 11, 501}, + dictWord{7, 0, 615}, + dictWord{138, 0, 251}, + dictWord{140, 11, 631}, + dictWord{135, 0, 1044}, + dictWord{6, 10, 19}, + dictWord{7, 10, 1413}, + dictWord{139, 10, 428}, + dictWord{ + 133, + 0, + 225, + }, + dictWord{7, 10, 96}, + dictWord{8, 10, 401}, + dictWord{8, 10, 703}, + dictWord{137, 10, 896}, + dictWord{145, 10, 116}, + dictWord{6, 11, 102}, + dictWord{ + 7, + 11, + 72, + }, + dictWord{15, 11, 142}, + dictWord{147, 11, 67}, + dictWord{7, 10, 1961}, + dictWord{7, 10, 1965}, + dictWord{8, 10, 702}, + dictWord{136, 10, 750}, + dictWord{ + 7, + 10, + 2030, + }, + dictWord{8, 10, 150}, + dictWord{8, 10, 737}, + dictWord{12, 10, 366}, + dictWord{151, 11, 30}, + dictWord{4, 0, 370}, + dictWord{5, 0, 756}, + dictWord{ + 7, + 0, + 1326, + }, + dictWord{135, 11, 823}, + dictWord{8, 10, 800}, + dictWord{9, 10, 148}, + dictWord{9, 10, 872}, + dictWord{9, 10, 890}, + dictWord{11, 10, 309}, + dictWord{ + 11, + 10, + 1001, + }, + dictWord{13, 10, 267}, + dictWord{141, 10, 323}, + dictWord{6, 0, 1662}, + dictWord{7, 0, 48}, + dictWord{8, 0, 771}, + dictWord{10, 0, 116}, + dictWord{ + 13, + 0, + 104, + }, + dictWord{14, 0, 105}, + dictWord{14, 0, 184}, + dictWord{15, 0, 168}, + dictWord{19, 0, 92}, + dictWord{148, 0, 68}, + dictWord{10, 0, 209}, + dictWord{ + 135, + 11, + 1870, + }, + dictWord{7, 11, 68}, + dictWord{8, 11, 48}, + dictWord{8, 11, 88}, + dictWord{8, 11, 582}, + dictWord{8, 11, 681}, + dictWord{9, 11, 373}, + dictWord{9, 11, 864}, + dictWord{11, 11, 157}, + dictWord{11, 11, 336}, + dictWord{11, 11, 843}, + dictWord{148, 11, 27}, + dictWord{134, 0, 930}, + dictWord{4, 11, 88}, + dictWord{5, 11, 137}, + dictWord{5, 11, 174}, + dictWord{5, 11, 777}, + dictWord{6, 11, 1664}, + dictWord{6, 11, 1725}, + dictWord{7, 11, 77}, + dictWord{7, 11, 426}, + dictWord{7, 11, 1317}, + dictWord{7, 11, 1355}, + dictWord{8, 11, 126}, + dictWord{8, 11, 563}, + dictWord{9, 11, 523}, + dictWord{9, 11, 750}, + dictWord{10, 11, 310}, + dictWord{10, 11, 836}, + dictWord{11, 11, 42}, + dictWord{11, 11, 318}, + dictWord{11, 11, 731}, + dictWord{12, 11, 68}, + dictWord{12, 11, 92}, + dictWord{12, 11, 507}, + dictWord{12, 11, 692}, + dictWord{13, 11, 81}, + dictWord{13, 11, 238}, + dictWord{13, 11, 374}, + dictWord{18, 11, 138}, + dictWord{19, 11, 78}, + dictWord{19, 11, 111}, + dictWord{20, 11, 55}, + dictWord{20, 11, 77}, + dictWord{148, 11, 92}, + dictWord{4, 11, 938}, + dictWord{135, 11, 1831}, + dictWord{5, 10, 547}, + dictWord{7, 10, 424}, + dictWord{ + 8, + 11, + 617, + }, + dictWord{138, 11, 351}, + dictWord{6, 0, 1286}, + dictWord{6, 11, 1668}, + dictWord{7, 11, 1499}, + dictWord{8, 11, 117}, + dictWord{9, 11, 314}, + dictWord{ + 138, + 11, + 174, + }, + dictWord{6, 0, 759}, + dictWord{6, 0, 894}, + dictWord{7, 11, 707}, + dictWord{139, 11, 563}, + dictWord{4, 0, 120}, + dictWord{135, 0, 1894}, + dictWord{ + 9, + 0, + 385, + }, + dictWord{149, 0, 17}, + dictWord{138, 0, 429}, + dictWord{133, 11, 403}, + dictWord{5, 0, 820}, + dictWord{135, 0, 931}, + dictWord{10, 0, 199}, + dictWord{ + 133, + 10, + 133, + }, + dictWord{6, 0, 151}, + dictWord{6, 0, 1675}, + dictWord{7, 0, 383}, + dictWord{151, 0, 10}, + dictWord{6, 0, 761}, + dictWord{136, 10, 187}, + dictWord{ + 8, + 0, + 365, + }, + dictWord{10, 10, 0}, + dictWord{10, 10, 818}, + dictWord{139, 10, 988}, + dictWord{4, 11, 44}, + dictWord{5, 11, 311}, + dictWord{6, 11, 156}, + dictWord{ + 7, + 11, + 639, + }, + dictWord{7, 11, 762}, + dictWord{7, 11, 1827}, + dictWord{9, 11, 8}, + dictWord{9, 11, 462}, + dictWord{148, 11, 83}, + dictWord{4, 11, 346}, + dictWord{7, 11, 115}, + dictWord{9, 11, 180}, + dictWord{9, 11, 456}, + dictWord{138, 11, 363}, + dictWord{136, 10, 685}, + dictWord{7, 0, 1086}, + dictWord{145, 0, 46}, + dictWord{ + 6, + 0, + 1624, + }, + dictWord{11, 0, 11}, + dictWord{12, 0, 422}, + dictWord{13, 0, 444}, + dictWord{142, 0, 360}, + dictWord{6, 0, 1020}, + dictWord{6, 0, 1260}, + dictWord{ + 134, + 0, + 1589, + }, + dictWord{4, 0, 43}, + dictWord{5, 0, 344}, + dictWord{5, 0, 357}, + dictWord{14, 0, 472}, + dictWord{150, 0, 58}, + dictWord{6, 0, 1864}, + dictWord{6, 0, 1866}, + dictWord{6, 0, 1868}, + dictWord{6, 0, 1869}, + dictWord{6, 0, 1874}, + dictWord{6, 0, 1877}, + dictWord{6, 0, 1903}, + dictWord{6, 0, 1911}, + dictWord{9, 0, 920}, + dictWord{ + 9, + 0, + 921, + }, + dictWord{9, 0, 924}, + dictWord{9, 0, 946}, + dictWord{9, 0, 959}, + dictWord{9, 0, 963}, + dictWord{9, 0, 970}, + dictWord{9, 0, 997}, + dictWord{9, 0, 1008}, + dictWord{ + 9, + 0, + 1017, + }, + dictWord{12, 0, 795}, + dictWord{12, 0, 797}, + dictWord{12, 0, 798}, + dictWord{12, 0, 800}, + dictWord{12, 0, 803}, + dictWord{12, 0, 811}, + dictWord{ + 12, + 0, + 820, + }, + dictWord{12, 0, 821}, + dictWord{12, 0, 839}, + dictWord{12, 0, 841}, + dictWord{12, 0, 848}, + dictWord{12, 0, 911}, + dictWord{12, 0, 921}, + dictWord{12, 0, 922}, + dictWord{12, 0, 925}, + dictWord{12, 0, 937}, + dictWord{12, 0, 944}, + dictWord{12, 0, 945}, + dictWord{12, 0, 953}, + dictWord{15, 0, 184}, + dictWord{15, 0, 191}, + dictWord{15, 0, 199}, + dictWord{15, 0, 237}, + dictWord{15, 0, 240}, + dictWord{15, 0, 243}, + dictWord{15, 0, 246}, + dictWord{18, 0, 203}, + dictWord{21, 0, 40}, + dictWord{ + 21, + 0, + 52, + }, + dictWord{21, 0, 57}, + dictWord{24, 0, 23}, + dictWord{24, 0, 28}, + dictWord{152, 0, 30}, + dictWord{134, 0, 725}, + dictWord{145, 11, 58}, + dictWord{133, 0, 888}, + dictWord{137, 10, 874}, + dictWord{4, 0, 711}, + dictWord{8, 10, 774}, + dictWord{10, 10, 670}, + dictWord{140, 10, 51}, + dictWord{144, 11, 40}, + dictWord{ + 6, + 11, + 185, + }, + dictWord{7, 11, 1899}, + dictWord{139, 11, 673}, + dictWord{137, 10, 701}, + dictWord{137, 0, 440}, + dictWord{4, 11, 327}, + dictWord{5, 11, 478}, + dictWord{ + 7, + 11, + 1332, + }, + dictWord{8, 11, 753}, + dictWord{140, 11, 227}, + dictWord{4, 10, 127}, + dictWord{5, 10, 350}, + dictWord{6, 10, 356}, + dictWord{8, 10, 426}, + dictWord{ + 9, + 10, + 572, + }, + dictWord{10, 10, 247}, + dictWord{139, 10, 312}, + dictWord{5, 11, 1020}, + dictWord{133, 11, 1022}, + dictWord{4, 11, 103}, + dictWord{ + 133, + 11, + 401, + }, + dictWord{6, 0, 1913}, + dictWord{6, 0, 1926}, + dictWord{6, 0, 1959}, + dictWord{9, 0, 914}, + dictWord{9, 0, 939}, + dictWord{9, 0, 952}, + dictWord{9, 0, 979}, + dictWord{ + 9, + 0, + 990, + }, + dictWord{9, 0, 998}, + dictWord{9, 0, 1003}, + dictWord{9, 0, 1023}, + dictWord{12, 0, 827}, + dictWord{12, 0, 834}, + dictWord{12, 0, 845}, + dictWord{ + 12, + 0, + 912, + }, + dictWord{12, 0, 935}, + dictWord{12, 0, 951}, + dictWord{15, 0, 172}, + dictWord{15, 0, 174}, + dictWord{18, 0, 198}, + dictWord{149, 0, 63}, + dictWord{5, 0, 958}, + dictWord{5, 0, 987}, + dictWord{4, 11, 499}, + dictWord{135, 11, 1421}, + dictWord{7, 0, 885}, + dictWord{6, 10, 59}, + dictWord{6, 10, 1762}, + dictWord{9, 10, 603}, + dictWord{141, 10, 397}, + dictWord{10, 11, 62}, + dictWord{141, 11, 164}, + dictWord{4, 0, 847}, + dictWord{135, 0, 326}, + dictWord{11, 0, 276}, + dictWord{142, 0, 293}, + dictWord{4, 0, 65}, + dictWord{5, 0, 479}, + dictWord{5, 0, 1004}, + dictWord{7, 0, 1913}, + dictWord{8, 0, 317}, + dictWord{9, 0, 302}, + dictWord{10, 0, 612}, + dictWord{ + 13, + 0, + 22, + }, + dictWord{132, 11, 96}, + dictWord{4, 0, 261}, + dictWord{135, 0, 510}, + dictWord{135, 0, 1514}, + dictWord{6, 10, 111}, + dictWord{7, 10, 4}, + dictWord{8, 10, 163}, + dictWord{8, 10, 776}, + dictWord{138, 10, 566}, + dictWord{4, 0, 291}, + dictWord{9, 0, 515}, + dictWord{12, 0, 152}, + dictWord{12, 0, 443}, + dictWord{13, 0, 392}, + dictWord{142, 0, 357}, + dictWord{7, 11, 399}, + dictWord{135, 11, 1492}, + dictWord{4, 0, 589}, + dictWord{139, 0, 282}, + dictWord{6, 11, 563}, + dictWord{ + 135, + 10, + 1994, + }, + dictWord{5, 10, 297}, + dictWord{135, 10, 1038}, + dictWord{4, 0, 130}, + dictWord{7, 0, 843}, + dictWord{135, 0, 1562}, + dictWord{5, 0, 42}, + dictWord{ + 5, + 0, + 879, + }, + dictWord{7, 0, 245}, + dictWord{7, 0, 324}, + dictWord{7, 0, 1532}, + dictWord{11, 0, 463}, + dictWord{11, 0, 472}, + dictWord{13, 0, 363}, + dictWord{144, 0, 52}, + dictWord{4, 0, 134}, + dictWord{133, 0, 372}, + dictWord{133, 0, 680}, + dictWord{136, 10, 363}, + dictWord{6, 0, 1997}, + dictWord{8, 0, 935}, + dictWord{136, 0, 977}, + dictWord{4, 0, 810}, + dictWord{135, 0, 1634}, + dictWord{135, 10, 1675}, + dictWord{7, 0, 1390}, + dictWord{4, 11, 910}, + dictWord{133, 11, 832}, + dictWord{ + 7, + 10, + 808, + }, + dictWord{8, 11, 266}, + dictWord{139, 11, 578}, + dictWord{132, 0, 644}, + dictWord{4, 0, 982}, + dictWord{138, 0, 867}, + dictWord{132, 10, 280}, + dictWord{ + 135, + 0, + 540, + }, + dictWord{140, 10, 54}, + dictWord{135, 0, 123}, + dictWord{134, 0, 1978}, + dictWord{4, 10, 421}, + dictWord{133, 10, 548}, + dictWord{6, 0, 623}, + dictWord{136, 0, 789}, + dictWord{4, 0, 908}, + dictWord{5, 0, 359}, + dictWord{5, 0, 508}, + dictWord{6, 0, 1723}, + dictWord{7, 0, 343}, + dictWord{7, 0, 1996}, + dictWord{ + 135, + 0, + 2026, + }, + dictWord{134, 0, 1220}, + dictWord{4, 0, 341}, + dictWord{135, 0, 480}, + dictWord{6, 10, 254}, + dictWord{9, 10, 109}, + dictWord{138, 10, 103}, + dictWord{ + 134, + 0, + 888, + }, + dictWord{8, 11, 528}, + dictWord{137, 11, 348}, + dictWord{7, 0, 1995}, + dictWord{8, 0, 299}, + dictWord{11, 0, 890}, + dictWord{12, 0, 674}, + dictWord{ + 4, + 11, + 20, + }, + dictWord{133, 11, 616}, + dictWord{135, 11, 1094}, + dictWord{134, 10, 1630}, + dictWord{4, 0, 238}, + dictWord{5, 0, 503}, + dictWord{6, 0, 179}, + dictWord{ + 7, + 0, + 2003, + }, + dictWord{8, 0, 381}, + dictWord{8, 0, 473}, + dictWord{9, 0, 149}, + dictWord{10, 0, 788}, + dictWord{15, 0, 45}, + dictWord{15, 0, 86}, + dictWord{20, 0, 110}, + dictWord{150, 0, 57}, + dictWord{133, 10, 671}, + dictWord{4, 11, 26}, + dictWord{5, 11, 429}, + dictWord{6, 11, 245}, + dictWord{7, 11, 704}, + dictWord{7, 11, 1379}, + dictWord{135, 11, 1474}, + dictWord{4, 0, 121}, + dictWord{5, 0, 156}, + dictWord{5, 0, 349}, + dictWord{9, 0, 431}, + dictWord{10, 0, 605}, + dictWord{142, 0, 342}, + dictWord{ + 7, + 11, + 943, + }, + dictWord{139, 11, 614}, + dictWord{132, 10, 889}, + dictWord{132, 11, 621}, + dictWord{7, 10, 1382}, + dictWord{7, 11, 1382}, + dictWord{ + 135, + 10, + 1910, + }, + dictWord{132, 10, 627}, + dictWord{133, 10, 775}, + dictWord{133, 11, 542}, + dictWord{133, 11, 868}, + dictWord{136, 11, 433}, + dictWord{6, 0, 1373}, + dictWord{7, 0, 1011}, + dictWord{11, 10, 362}, + dictWord{11, 10, 948}, + dictWord{140, 10, 388}, + dictWord{6, 0, 80}, + dictWord{7, 0, 173}, + dictWord{9, 0, 547}, + dictWord{10, 0, 730}, + dictWord{14, 0, 18}, + dictWord{22, 0, 39}, + dictWord{135, 11, 1495}, + dictWord{6, 0, 1694}, + dictWord{135, 0, 1974}, + dictWord{140, 0, 196}, + dictWord{4, 0, 923}, + dictWord{6, 0, 507}, + dictWord{6, 0, 1711}, + dictWord{7, 10, 451}, + dictWord{8, 10, 389}, + dictWord{12, 10, 490}, + dictWord{13, 10, 16}, + dictWord{ + 13, + 10, + 215, + }, + dictWord{13, 10, 351}, + dictWord{18, 10, 132}, + dictWord{147, 10, 125}, + dictWord{6, 0, 646}, + dictWord{134, 0, 1047}, + dictWord{135, 10, 841}, + dictWord{136, 10, 566}, + dictWord{6, 0, 1611}, + dictWord{135, 0, 1214}, + dictWord{139, 0, 926}, + dictWord{132, 11, 525}, + dictWord{132, 0, 595}, + dictWord{ + 5, + 0, + 240, + }, + dictWord{6, 0, 459}, + dictWord{7, 0, 12}, + dictWord{7, 0, 114}, + dictWord{7, 0, 949}, + dictWord{7, 0, 1753}, + dictWord{7, 0, 1805}, + dictWord{8, 0, 658}, + dictWord{ + 9, + 0, + 1, + }, + dictWord{11, 0, 959}, + dictWord{141, 0, 446}, + dictWord{5, 10, 912}, + dictWord{134, 10, 1695}, + dictWord{132, 0, 446}, + dictWord{7, 11, 62}, + dictWord{ + 12, + 11, + 45, + }, + dictWord{147, 11, 112}, + dictWord{5, 10, 236}, + dictWord{6, 10, 572}, + dictWord{8, 10, 492}, + dictWord{11, 10, 618}, + dictWord{144, 10, 56}, + dictWord{ + 5, + 10, + 190, + }, + dictWord{136, 10, 318}, + dictWord{135, 10, 1376}, + dictWord{4, 11, 223}, + dictWord{6, 11, 359}, + dictWord{11, 11, 3}, + dictWord{13, 11, 108}, + dictWord{ + 14, + 11, + 89, + }, + dictWord{144, 11, 22}, + dictWord{132, 11, 647}, + dictWord{134, 0, 490}, + dictWord{134, 0, 491}, + dictWord{134, 0, 1584}, + dictWord{ + 135, + 11, + 685, + }, + dictWord{138, 11, 220}, + dictWord{7, 0, 250}, + dictWord{136, 0, 507}, + dictWord{132, 0, 158}, + dictWord{4, 0, 140}, + dictWord{7, 0, 362}, + dictWord{8, 0, 209}, + dictWord{9, 0, 10}, + dictWord{9, 0, 160}, + dictWord{9, 0, 503}, + dictWord{9, 0, 614}, + dictWord{10, 0, 689}, + dictWord{11, 0, 327}, + dictWord{11, 0, 553}, + dictWord{ + 11, + 0, + 725, + }, + dictWord{11, 0, 767}, + dictWord{12, 0, 252}, + dictWord{12, 0, 583}, + dictWord{13, 0, 192}, + dictWord{14, 0, 269}, + dictWord{14, 0, 356}, + dictWord{148, 0, 50}, + dictWord{19, 0, 1}, + dictWord{19, 0, 26}, + dictWord{150, 0, 9}, + dictWord{132, 11, 109}, + dictWord{6, 0, 228}, + dictWord{7, 0, 1341}, + dictWord{9, 0, 408}, + dictWord{ + 138, + 0, + 343, + }, + dictWord{4, 0, 373}, + dictWord{5, 0, 283}, + dictWord{6, 0, 480}, + dictWord{7, 0, 609}, + dictWord{10, 0, 860}, + dictWord{138, 0, 878}, + dictWord{6, 0, 779}, + dictWord{134, 0, 1209}, + dictWord{4, 0, 557}, + dictWord{7, 11, 263}, + dictWord{7, 11, 628}, + dictWord{136, 11, 349}, + dictWord{132, 0, 548}, + dictWord{7, 0, 197}, + dictWord{8, 0, 142}, + dictWord{8, 0, 325}, + dictWord{9, 0, 150}, + dictWord{9, 0, 596}, + dictWord{10, 0, 350}, + dictWord{10, 0, 353}, + dictWord{11, 0, 74}, + dictWord{ + 11, + 0, + 315, + }, + dictWord{12, 0, 662}, + dictWord{12, 0, 681}, + dictWord{14, 0, 423}, + dictWord{143, 0, 141}, + dictWord{4, 11, 40}, + dictWord{10, 11, 67}, + dictWord{ + 11, + 11, + 117, + }, + dictWord{11, 11, 768}, + dictWord{139, 11, 935}, + dictWord{7, 11, 992}, + dictWord{8, 11, 301}, + dictWord{9, 11, 722}, + dictWord{12, 11, 63}, + dictWord{ + 13, + 11, + 29, + }, + dictWord{14, 11, 161}, + dictWord{143, 11, 18}, + dictWord{6, 0, 1490}, + dictWord{138, 11, 532}, + dictWord{5, 0, 580}, + dictWord{7, 0, 378}, + dictWord{ + 7, + 0, + 674, + }, + dictWord{7, 0, 1424}, + dictWord{15, 0, 83}, + dictWord{16, 0, 11}, + dictWord{15, 11, 83}, + dictWord{144, 11, 11}, + dictWord{6, 0, 1057}, + dictWord{6, 0, 1335}, + dictWord{10, 0, 316}, + dictWord{7, 10, 85}, + dictWord{7, 10, 247}, + dictWord{8, 10, 585}, + dictWord{138, 10, 163}, + dictWord{4, 0, 169}, + dictWord{5, 0, 83}, + dictWord{ + 6, + 0, + 399, + }, + dictWord{6, 0, 579}, + dictWord{6, 0, 1513}, + dictWord{7, 0, 692}, + dictWord{7, 0, 846}, + dictWord{7, 0, 1015}, + dictWord{7, 0, 1799}, + dictWord{8, 0, 403}, + dictWord{9, 0, 394}, + dictWord{10, 0, 133}, + dictWord{12, 0, 4}, + dictWord{12, 0, 297}, + dictWord{12, 0, 452}, + dictWord{16, 0, 81}, + dictWord{18, 0, 25}, + dictWord{21, 0, 14}, + dictWord{22, 0, 12}, + dictWord{151, 0, 18}, + dictWord{134, 0, 1106}, + dictWord{7, 0, 1546}, + dictWord{11, 0, 299}, + dictWord{142, 0, 407}, + dictWord{134, 0, 1192}, + dictWord{132, 0, 177}, + dictWord{5, 0, 411}, + dictWord{135, 0, 653}, + dictWord{7, 0, 439}, + dictWord{10, 0, 727}, + dictWord{11, 0, 260}, + dictWord{139, 0, 684}, + dictWord{138, 10, 145}, + dictWord{147, 10, 83}, + dictWord{5, 0, 208}, + dictWord{7, 0, 753}, + dictWord{135, 0, 1528}, + dictWord{137, 11, 617}, + dictWord{ + 135, + 10, + 1922, + }, + dictWord{135, 11, 825}, + dictWord{11, 0, 422}, + dictWord{13, 0, 389}, + dictWord{4, 10, 124}, + dictWord{10, 10, 457}, + dictWord{11, 10, 121}, + dictWord{ + 11, + 10, + 169, + }, + dictWord{11, 10, 870}, + dictWord{12, 10, 214}, + dictWord{14, 10, 187}, + dictWord{143, 10, 77}, + dictWord{11, 0, 615}, + dictWord{15, 0, 58}, + dictWord{ + 11, + 11, + 615, + }, + dictWord{143, 11, 58}, + dictWord{9, 0, 618}, + dictWord{138, 0, 482}, + dictWord{6, 0, 1952}, + dictWord{6, 0, 1970}, + dictWord{142, 0, 505}, + dictWord{ + 7, + 10, + 1193, + }, + dictWord{135, 11, 1838}, + dictWord{133, 0, 242}, + dictWord{135, 10, 1333}, + dictWord{6, 10, 107}, + dictWord{7, 10, 638}, + dictWord{ + 7, + 10, + 1632, + }, + dictWord{137, 10, 396}, + dictWord{133, 0, 953}, + dictWord{5, 10, 370}, + dictWord{134, 10, 1756}, + dictWord{5, 11, 28}, + dictWord{6, 11, 204}, + dictWord{ + 10, + 11, + 320, + }, + dictWord{10, 11, 583}, + dictWord{13, 11, 502}, + dictWord{14, 11, 72}, + dictWord{14, 11, 274}, + dictWord{14, 11, 312}, + dictWord{14, 11, 344}, + dictWord{15, 11, 159}, + dictWord{16, 11, 62}, + dictWord{16, 11, 69}, + dictWord{17, 11, 30}, + dictWord{18, 11, 42}, + dictWord{18, 11, 53}, + dictWord{18, 11, 84}, + dictWord{18, 11, 140}, + dictWord{19, 11, 68}, + dictWord{19, 11, 85}, + dictWord{20, 11, 5}, + dictWord{20, 11, 45}, + dictWord{20, 11, 101}, + dictWord{22, 11, 7}, + dictWord{ + 150, + 11, + 20, + }, + dictWord{4, 11, 558}, + dictWord{6, 11, 390}, + dictWord{7, 11, 162}, + dictWord{7, 11, 689}, + dictWord{9, 11, 360}, + dictWord{138, 11, 653}, + dictWord{ + 11, + 0, + 802, + }, + dictWord{141, 0, 67}, + dictWord{133, 10, 204}, + dictWord{133, 0, 290}, + dictWord{5, 10, 970}, + dictWord{134, 10, 1706}, + dictWord{132, 0, 380}, + dictWord{5, 0, 52}, + dictWord{7, 0, 277}, + dictWord{9, 0, 368}, + dictWord{139, 0, 791}, + dictWord{5, 11, 856}, + dictWord{6, 11, 1672}, + dictWord{6, 11, 1757}, + dictWord{ + 6, + 11, + 1781, + }, + dictWord{7, 11, 1150}, + dictWord{7, 11, 1425}, + dictWord{7, 11, 1453}, + dictWord{140, 11, 513}, + dictWord{5, 11, 92}, + dictWord{7, 10, 3}, + dictWord{ + 10, + 11, + 736, + }, + dictWord{140, 11, 102}, + dictWord{4, 0, 112}, + dictWord{5, 0, 653}, + dictWord{5, 10, 483}, + dictWord{5, 10, 685}, + dictWord{6, 10, 489}, + dictWord{ + 7, + 10, + 1204, + }, + dictWord{136, 10, 394}, + dictWord{132, 10, 921}, + dictWord{6, 0, 1028}, + dictWord{133, 10, 1007}, + dictWord{5, 11, 590}, + dictWord{9, 11, 213}, + dictWord{145, 11, 91}, + dictWord{135, 10, 1696}, + dictWord{10, 0, 138}, + dictWord{139, 0, 476}, + dictWord{5, 0, 725}, + dictWord{5, 0, 727}, + dictWord{135, 0, 1811}, + dictWord{4, 0, 979}, + dictWord{6, 0, 1821}, + dictWord{6, 0, 1838}, + dictWord{8, 0, 876}, + dictWord{8, 0, 883}, + dictWord{8, 0, 889}, + dictWord{8, 0, 893}, + dictWord{ + 8, + 0, + 895, + }, + dictWord{10, 0, 934}, + dictWord{12, 0, 720}, + dictWord{14, 0, 459}, + dictWord{148, 0, 123}, + dictWord{135, 11, 551}, + dictWord{4, 0, 38}, + dictWord{6, 0, 435}, + dictWord{7, 0, 307}, + dictWord{7, 0, 999}, + dictWord{7, 0, 1481}, + dictWord{7, 0, 1732}, + dictWord{7, 0, 1738}, + dictWord{8, 0, 371}, + dictWord{9, 0, 414}, + dictWord{ + 11, + 0, + 316, + }, + dictWord{12, 0, 52}, + dictWord{13, 0, 420}, + dictWord{147, 0, 100}, + dictWord{135, 0, 1296}, + dictWord{132, 10, 712}, + dictWord{134, 10, 1629}, + dictWord{133, 0, 723}, + dictWord{134, 0, 651}, + dictWord{136, 11, 191}, + dictWord{9, 11, 791}, + dictWord{10, 11, 93}, + dictWord{11, 11, 301}, + dictWord{16, 11, 13}, + dictWord{17, 11, 23}, + dictWord{18, 11, 135}, + dictWord{19, 11, 12}, + dictWord{20, 11, 1}, + dictWord{20, 11, 12}, + dictWord{148, 11, 14}, + dictWord{136, 11, 503}, + dictWord{6, 11, 466}, + dictWord{135, 11, 671}, + dictWord{6, 0, 1200}, + dictWord{134, 0, 1330}, + dictWord{135, 0, 1255}, + dictWord{134, 0, 986}, + dictWord{ + 5, + 0, + 109, + }, + dictWord{6, 0, 1784}, + dictWord{7, 0, 1895}, + dictWord{12, 0, 296}, + dictWord{140, 0, 302}, + dictWord{135, 11, 983}, + dictWord{133, 10, 485}, + dictWord{ + 134, + 0, + 660, + }, + dictWord{134, 0, 800}, + dictWord{5, 0, 216}, + dictWord{5, 0, 294}, + dictWord{6, 0, 591}, + dictWord{7, 0, 1879}, + dictWord{9, 0, 141}, + dictWord{9, 0, 270}, + dictWord{9, 0, 679}, + dictWord{10, 0, 159}, + dictWord{11, 0, 197}, + dictWord{11, 0, 438}, + dictWord{12, 0, 538}, + dictWord{12, 0, 559}, + dictWord{14, 0, 144}, + dictWord{ + 14, + 0, + 167, + }, + dictWord{15, 0, 67}, + dictWord{4, 10, 285}, + dictWord{5, 10, 317}, + dictWord{6, 10, 301}, + dictWord{7, 10, 7}, + dictWord{8, 10, 153}, + dictWord{ + 10, + 10, + 766, + }, + dictWord{11, 10, 468}, + dictWord{12, 10, 467}, + dictWord{141, 10, 143}, + dictWord{136, 0, 945}, + dictWord{134, 0, 1090}, + dictWord{137, 0, 81}, + dictWord{12, 11, 468}, + dictWord{19, 11, 96}, + dictWord{148, 11, 24}, + dictWord{134, 0, 391}, + dictWord{138, 11, 241}, + dictWord{7, 0, 322}, + dictWord{136, 0, 249}, + dictWord{134, 0, 1412}, + dictWord{135, 11, 795}, + dictWord{5, 0, 632}, + dictWord{138, 0, 526}, + dictWord{136, 10, 819}, + dictWord{6, 0, 144}, + dictWord{7, 0, 948}, + dictWord{7, 0, 1042}, + dictWord{8, 0, 235}, + dictWord{8, 0, 461}, + dictWord{9, 0, 453}, + dictWord{9, 0, 796}, + dictWord{10, 0, 354}, + dictWord{17, 0, 77}, + dictWord{ + 135, + 11, + 954, + }, + dictWord{139, 10, 917}, + dictWord{6, 0, 940}, + dictWord{134, 0, 1228}, + dictWord{4, 0, 362}, + dictWord{7, 0, 52}, + dictWord{135, 0, 303}, + dictWord{ + 6, + 11, + 549, + }, + dictWord{8, 11, 34}, + dictWord{8, 11, 283}, + dictWord{9, 11, 165}, + dictWord{138, 11, 475}, + dictWord{7, 11, 370}, + dictWord{7, 11, 1007}, + dictWord{ + 7, + 11, + 1177, + }, + dictWord{135, 11, 1565}, + dictWord{5, 11, 652}, + dictWord{5, 11, 701}, + dictWord{135, 11, 449}, + dictWord{5, 0, 196}, + dictWord{6, 0, 486}, + dictWord{ + 7, + 0, + 212, + }, + dictWord{8, 0, 309}, + dictWord{136, 0, 346}, + dictWord{6, 10, 1719}, + dictWord{6, 10, 1735}, + dictWord{7, 10, 2016}, + dictWord{7, 10, 2020}, + dictWord{ + 8, + 10, + 837, + }, + dictWord{137, 10, 852}, + dictWord{6, 11, 159}, + dictWord{6, 11, 364}, + dictWord{7, 11, 516}, + dictWord{7, 11, 1439}, + dictWord{137, 11, 518}, + dictWord{135, 0, 1912}, + dictWord{135, 0, 1290}, + dictWord{132, 0, 686}, + dictWord{141, 11, 151}, + dictWord{138, 0, 625}, + dictWord{136, 0, 706}, + dictWord{ + 138, + 10, + 568, + }, + dictWord{139, 0, 412}, + dictWord{4, 0, 30}, + dictWord{133, 0, 43}, + dictWord{8, 10, 67}, + dictWord{138, 10, 419}, + dictWord{7, 0, 967}, + dictWord{ + 141, + 0, + 11, + }, + dictWord{12, 0, 758}, + dictWord{14, 0, 441}, + dictWord{142, 0, 462}, + dictWord{10, 10, 657}, + dictWord{14, 10, 297}, + dictWord{142, 10, 361}, + dictWord{ + 139, + 10, + 729, + }, + dictWord{4, 0, 220}, + dictWord{135, 0, 1535}, + dictWord{7, 11, 501}, + dictWord{9, 11, 111}, + dictWord{10, 11, 141}, + dictWord{11, 11, 332}, + dictWord{ + 13, + 11, + 43, + }, + dictWord{13, 11, 429}, + dictWord{14, 11, 130}, + dictWord{14, 11, 415}, + dictWord{145, 11, 102}, + dictWord{4, 0, 950}, + dictWord{6, 0, 1859}, + dictWord{ + 7, + 0, + 11, + }, + dictWord{8, 0, 873}, + dictWord{12, 0, 710}, + dictWord{12, 0, 718}, + dictWord{12, 0, 748}, + dictWord{12, 0, 765}, + dictWord{148, 0, 124}, + dictWord{ + 5, + 11, + 149, + }, + dictWord{5, 11, 935}, + dictWord{136, 11, 233}, + dictWord{142, 11, 291}, + dictWord{134, 0, 1579}, + dictWord{7, 0, 890}, + dictWord{8, 10, 51}, + dictWord{ + 9, + 10, + 868, + }, + dictWord{10, 10, 833}, + dictWord{12, 10, 481}, + dictWord{12, 10, 570}, + dictWord{148, 10, 106}, + dictWord{141, 0, 2}, + dictWord{132, 10, 445}, + dictWord{136, 11, 801}, + dictWord{135, 0, 1774}, + dictWord{7, 0, 1725}, + dictWord{138, 0, 393}, + dictWord{5, 0, 263}, + dictWord{134, 0, 414}, + dictWord{ + 132, + 11, + 322, + }, + dictWord{133, 10, 239}, + dictWord{7, 0, 456}, + dictWord{7, 10, 1990}, + dictWord{8, 10, 130}, + dictWord{139, 10, 720}, + dictWord{137, 0, 818}, + dictWord{ + 5, + 10, + 123, + }, + dictWord{6, 10, 530}, + dictWord{7, 10, 348}, + dictWord{135, 10, 1419}, + dictWord{135, 10, 2024}, + dictWord{6, 0, 178}, + dictWord{6, 0, 1750}, + dictWord{8, 0, 251}, + dictWord{9, 0, 690}, + dictWord{10, 0, 155}, + dictWord{10, 0, 196}, + dictWord{10, 0, 373}, + dictWord{11, 0, 698}, + dictWord{13, 0, 155}, + dictWord{ + 148, + 0, + 93, + }, + dictWord{5, 0, 97}, + dictWord{137, 0, 393}, + dictWord{134, 0, 674}, + dictWord{11, 0, 223}, + dictWord{140, 0, 168}, + dictWord{132, 10, 210}, + dictWord{ + 139, + 11, + 464, + }, + dictWord{6, 0, 1639}, + dictWord{146, 0, 159}, + dictWord{139, 11, 2}, + dictWord{7, 0, 934}, + dictWord{8, 0, 647}, + dictWord{17, 0, 97}, + dictWord{19, 0, 59}, + dictWord{150, 0, 2}, + dictWord{132, 0, 191}, + dictWord{5, 0, 165}, + dictWord{9, 0, 346}, + dictWord{10, 0, 655}, + dictWord{11, 0, 885}, + dictWord{4, 10, 430}, + dictWord{135, 11, 357}, + dictWord{133, 0, 877}, + dictWord{5, 10, 213}, + dictWord{133, 11, 406}, + dictWord{8, 0, 128}, + dictWord{139, 0, 179}, + dictWord{6, 11, 69}, + dictWord{135, 11, 117}, + dictWord{135, 0, 1297}, + dictWord{11, 11, 43}, + dictWord{13, 11, 72}, + dictWord{141, 11, 142}, + dictWord{135, 11, 1830}, + dictWord{ + 142, + 0, + 164, + }, + dictWord{5, 0, 57}, + dictWord{6, 0, 101}, + dictWord{6, 0, 586}, + dictWord{6, 0, 1663}, + dictWord{7, 0, 132}, + dictWord{7, 0, 1154}, + dictWord{7, 0, 1415}, + dictWord{7, 0, 1507}, + dictWord{12, 0, 493}, + dictWord{15, 0, 105}, + dictWord{151, 0, 15}, + dictWord{5, 0, 459}, + dictWord{7, 0, 1073}, + dictWord{8, 0, 241}, + dictWord{ + 136, + 0, + 334, + }, + dictWord{133, 11, 826}, + dictWord{133, 10, 108}, + dictWord{5, 10, 219}, + dictWord{10, 11, 132}, + dictWord{11, 11, 191}, + dictWord{11, 11, 358}, + dictWord{139, 11, 460}, + dictWord{6, 0, 324}, + dictWord{6, 0, 520}, + dictWord{7, 0, 338}, + dictWord{7, 0, 1729}, + dictWord{8, 0, 228}, + dictWord{139, 0, 750}, + dictWord{ + 21, + 0, + 30, + }, + dictWord{22, 0, 53}, + dictWord{4, 10, 193}, + dictWord{5, 10, 916}, + dictWord{7, 10, 364}, + dictWord{10, 10, 398}, + dictWord{10, 10, 726}, + dictWord{ + 11, + 10, + 317, + }, + dictWord{11, 10, 626}, + dictWord{12, 10, 142}, + dictWord{12, 10, 288}, + dictWord{12, 10, 678}, + dictWord{13, 10, 313}, + dictWord{15, 10, 113}, + dictWord{146, 10, 114}, + dictWord{6, 11, 110}, + dictWord{135, 11, 1681}, + dictWord{135, 0, 910}, + dictWord{6, 10, 241}, + dictWord{7, 10, 907}, + dictWord{8, 10, 832}, + dictWord{9, 10, 342}, + dictWord{10, 10, 729}, + dictWord{11, 10, 284}, + dictWord{11, 10, 445}, + dictWord{11, 10, 651}, + dictWord{11, 10, 863}, + dictWord{ + 13, + 10, + 398, + }, + dictWord{146, 10, 99}, + dictWord{7, 0, 705}, + dictWord{9, 0, 734}, + dictWord{5, 11, 1000}, + dictWord{7, 11, 733}, + dictWord{137, 11, 583}, + dictWord{4, 0, 73}, + dictWord{6, 0, 612}, + dictWord{7, 0, 927}, + dictWord{7, 0, 1822}, + dictWord{8, 0, 217}, + dictWord{9, 0, 765}, + dictWord{9, 0, 766}, + dictWord{10, 0, 408}, + dictWord{ + 11, + 0, + 51, + }, + dictWord{11, 0, 793}, + dictWord{12, 0, 266}, + dictWord{15, 0, 158}, + dictWord{20, 0, 89}, + dictWord{150, 0, 32}, + dictWord{7, 0, 1330}, + dictWord{4, 11, 297}, + dictWord{6, 11, 529}, + dictWord{7, 11, 152}, + dictWord{7, 11, 713}, + dictWord{7, 11, 1845}, + dictWord{8, 11, 710}, + dictWord{8, 11, 717}, + dictWord{140, 11, 639}, + dictWord{5, 0, 389}, + dictWord{136, 0, 636}, + dictWord{134, 0, 1409}, + dictWord{4, 10, 562}, + dictWord{9, 10, 254}, + dictWord{139, 10, 879}, + dictWord{134, 0, 893}, + dictWord{132, 10, 786}, + dictWord{4, 11, 520}, + dictWord{135, 11, 575}, + dictWord{136, 0, 21}, + dictWord{140, 0, 721}, + dictWord{136, 0, 959}, + dictWord{ + 7, + 11, + 1428, + }, + dictWord{7, 11, 1640}, + dictWord{9, 11, 169}, + dictWord{9, 11, 182}, + dictWord{9, 11, 367}, + dictWord{9, 11, 478}, + dictWord{9, 11, 506}, + dictWord{ + 9, + 11, + 551, + }, + dictWord{9, 11, 648}, + dictWord{9, 11, 651}, + dictWord{9, 11, 697}, + dictWord{9, 11, 705}, + dictWord{9, 11, 725}, + dictWord{9, 11, 787}, + dictWord{9, 11, 794}, + dictWord{10, 11, 198}, + dictWord{10, 11, 214}, + dictWord{10, 11, 267}, + dictWord{10, 11, 275}, + dictWord{10, 11, 456}, + dictWord{10, 11, 551}, + dictWord{ + 10, + 11, + 561, + }, + dictWord{10, 11, 613}, + dictWord{10, 11, 627}, + dictWord{10, 11, 668}, + dictWord{10, 11, 675}, + dictWord{10, 11, 691}, + dictWord{10, 11, 695}, + dictWord{10, 11, 707}, + dictWord{10, 11, 715}, + dictWord{11, 11, 183}, + dictWord{11, 11, 201}, + dictWord{11, 11, 244}, + dictWord{11, 11, 262}, + dictWord{ + 11, + 11, + 352, + }, + dictWord{11, 11, 439}, + dictWord{11, 11, 493}, + dictWord{11, 11, 572}, + dictWord{11, 11, 591}, + dictWord{11, 11, 608}, + dictWord{11, 11, 611}, + dictWord{ + 11, + 11, + 646, + }, + dictWord{11, 11, 674}, + dictWord{11, 11, 711}, + dictWord{11, 11, 751}, + dictWord{11, 11, 761}, + dictWord{11, 11, 776}, + dictWord{11, 11, 785}, + dictWord{11, 11, 850}, + dictWord{11, 11, 853}, + dictWord{11, 11, 862}, + dictWord{11, 11, 865}, + dictWord{11, 11, 868}, + dictWord{11, 11, 898}, + dictWord{ + 11, + 11, + 902, + }, + dictWord{11, 11, 903}, + dictWord{11, 11, 910}, + dictWord{11, 11, 932}, + dictWord{11, 11, 942}, + dictWord{11, 11, 957}, + dictWord{11, 11, 967}, + dictWord{ + 11, + 11, + 972, + }, + dictWord{12, 11, 148}, + dictWord{12, 11, 195}, + dictWord{12, 11, 220}, + dictWord{12, 11, 237}, + dictWord{12, 11, 318}, + dictWord{12, 11, 339}, + dictWord{12, 11, 393}, + dictWord{12, 11, 445}, + dictWord{12, 11, 450}, + dictWord{12, 11, 474}, + dictWord{12, 11, 509}, + dictWord{12, 11, 533}, + dictWord{ + 12, + 11, + 591, + }, + dictWord{12, 11, 594}, + dictWord{12, 11, 597}, + dictWord{12, 11, 621}, + dictWord{12, 11, 633}, + dictWord{12, 11, 642}, + dictWord{13, 11, 59}, + dictWord{ + 13, + 11, + 60, + }, + dictWord{13, 11, 145}, + dictWord{13, 11, 239}, + dictWord{13, 11, 250}, + dictWord{13, 11, 273}, + dictWord{13, 11, 329}, + dictWord{13, 11, 344}, + dictWord{13, 11, 365}, + dictWord{13, 11, 372}, + dictWord{13, 11, 387}, + dictWord{13, 11, 403}, + dictWord{13, 11, 414}, + dictWord{13, 11, 456}, + dictWord{ + 13, + 11, + 478, + }, + dictWord{13, 11, 483}, + dictWord{13, 11, 489}, + dictWord{14, 11, 55}, + dictWord{14, 11, 57}, + dictWord{14, 11, 81}, + dictWord{14, 11, 90}, + dictWord{ + 14, + 11, + 148, + }, + dictWord{14, 11, 239}, + dictWord{14, 11, 266}, + dictWord{14, 11, 321}, + dictWord{14, 11, 326}, + dictWord{14, 11, 327}, + dictWord{14, 11, 330}, + dictWord{ + 14, + 11, + 347, + }, + dictWord{14, 11, 355}, + dictWord{14, 11, 401}, + dictWord{14, 11, 411}, + dictWord{14, 11, 414}, + dictWord{14, 11, 416}, + dictWord{14, 11, 420}, + dictWord{15, 11, 61}, + dictWord{15, 11, 74}, + dictWord{15, 11, 87}, + dictWord{15, 11, 88}, + dictWord{15, 11, 94}, + dictWord{15, 11, 96}, + dictWord{15, 11, 116}, + dictWord{15, 11, 149}, + dictWord{15, 11, 154}, + dictWord{16, 11, 50}, + dictWord{16, 11, 63}, + dictWord{16, 11, 73}, + dictWord{17, 11, 2}, + dictWord{17, 11, 66}, + dictWord{ + 17, + 11, + 92, + }, + dictWord{17, 11, 103}, + dictWord{17, 11, 112}, + dictWord{18, 11, 50}, + dictWord{18, 11, 54}, + dictWord{18, 11, 82}, + dictWord{18, 11, 86}, + dictWord{ + 18, + 11, + 90, + }, + dictWord{18, 11, 111}, + dictWord{18, 11, 115}, + dictWord{18, 11, 156}, + dictWord{19, 11, 40}, + dictWord{19, 11, 79}, + dictWord{20, 11, 78}, + dictWord{ + 149, + 11, + 22, + }, + dictWord{137, 11, 170}, + dictWord{134, 0, 1433}, + dictWord{135, 11, 1307}, + dictWord{139, 11, 411}, + dictWord{5, 0, 189}, + dictWord{7, 0, 442}, + dictWord{7, 0, 443}, + dictWord{8, 0, 281}, + dictWord{12, 0, 174}, + dictWord{141, 0, 261}, + dictWord{6, 10, 216}, + dictWord{7, 10, 901}, + dictWord{7, 10, 1343}, + dictWord{136, 10, 493}, + dictWord{5, 11, 397}, + dictWord{6, 11, 154}, + dictWord{7, 10, 341}, + dictWord{7, 11, 676}, + dictWord{8, 11, 443}, + dictWord{8, 11, 609}, + dictWord{ + 9, + 11, + 24, + }, + dictWord{9, 11, 325}, + dictWord{10, 11, 35}, + dictWord{11, 10, 219}, + dictWord{11, 11, 535}, + dictWord{11, 11, 672}, + dictWord{11, 11, 1018}, + dictWord{12, 11, 637}, + dictWord{144, 11, 30}, + dictWord{6, 0, 2}, + dictWord{7, 0, 191}, + dictWord{7, 0, 446}, + dictWord{7, 0, 1262}, + dictWord{7, 0, 1737}, + dictWord{8, 0, 22}, + dictWord{8, 0, 270}, + dictWord{8, 0, 612}, + dictWord{9, 0, 4}, + dictWord{9, 0, 312}, + dictWord{9, 0, 436}, + dictWord{9, 0, 626}, + dictWord{10, 0, 216}, + dictWord{10, 0, 311}, + dictWord{10, 0, 521}, + dictWord{10, 0, 623}, + dictWord{11, 0, 72}, + dictWord{11, 0, 330}, + dictWord{11, 0, 455}, + dictWord{12, 0, 321}, + dictWord{12, 0, 504}, + dictWord{12, 0, 530}, + dictWord{12, 0, 543}, + dictWord{13, 0, 17}, + dictWord{13, 0, 156}, + dictWord{13, 0, 334}, + dictWord{14, 0, 131}, + dictWord{17, 0, 60}, + dictWord{ + 148, + 0, + 64, + }, + dictWord{7, 0, 354}, + dictWord{10, 0, 410}, + dictWord{139, 0, 815}, + dictWord{139, 10, 130}, + dictWord{7, 10, 1734}, + dictWord{137, 11, 631}, + dictWord{ + 12, + 0, + 425, + }, + dictWord{15, 0, 112}, + dictWord{10, 10, 115}, + dictWord{11, 10, 420}, + dictWord{13, 10, 404}, + dictWord{14, 10, 346}, + dictWord{143, 10, 54}, + dictWord{ + 6, + 0, + 60, + }, + dictWord{6, 0, 166}, + dictWord{7, 0, 374}, + dictWord{7, 0, 670}, + dictWord{7, 0, 1327}, + dictWord{8, 0, 411}, + dictWord{8, 0, 435}, + dictWord{9, 0, 653}, + dictWord{ + 9, + 0, + 740, + }, + dictWord{10, 0, 385}, + dictWord{11, 0, 222}, + dictWord{11, 0, 324}, + dictWord{11, 0, 829}, + dictWord{140, 0, 611}, + dictWord{7, 0, 1611}, + dictWord{ + 13, + 0, + 14, + }, + dictWord{15, 0, 44}, + dictWord{19, 0, 13}, + dictWord{148, 0, 76}, + dictWord{133, 11, 981}, + dictWord{4, 11, 56}, + dictWord{7, 11, 1791}, + dictWord{8, 11, 607}, + dictWord{8, 11, 651}, + dictWord{11, 11, 465}, + dictWord{11, 11, 835}, + dictWord{12, 11, 337}, + dictWord{141, 11, 480}, + dictWord{6, 0, 1478}, + dictWord{ + 5, + 10, + 1011, + }, + dictWord{136, 10, 701}, + dictWord{139, 0, 596}, + dictWord{5, 0, 206}, + dictWord{134, 0, 398}, + dictWord{4, 10, 54}, + dictWord{5, 10, 666}, + dictWord{ + 7, + 10, + 1039, + }, + dictWord{7, 10, 1130}, + dictWord{9, 10, 195}, + dictWord{138, 10, 302}, + dictWord{7, 0, 50}, + dictWord{9, 11, 158}, + dictWord{138, 11, 411}, + dictWord{ + 135, + 11, + 1120, + }, + dictWord{6, 0, 517}, + dictWord{7, 0, 1159}, + dictWord{10, 0, 621}, + dictWord{11, 0, 192}, + dictWord{134, 10, 1669}, + dictWord{4, 0, 592}, + dictWord{ + 6, + 0, + 600, + }, + dictWord{135, 0, 1653}, + dictWord{10, 0, 223}, + dictWord{139, 0, 645}, + dictWord{136, 11, 139}, + dictWord{7, 0, 64}, + dictWord{136, 0, 245}, + dictWord{ + 142, + 0, + 278, + }, + dictWord{6, 11, 622}, + dictWord{135, 11, 1030}, + dictWord{136, 0, 604}, + dictWord{134, 0, 1502}, + dictWord{138, 0, 265}, + dictWord{ + 141, + 11, + 168, + }, + dictWord{7, 0, 1763}, + dictWord{140, 0, 310}, + dictWord{7, 10, 798}, + dictWord{139, 11, 719}, + dictWord{7, 11, 160}, + dictWord{10, 11, 624}, + dictWord{ + 142, + 11, + 279, + }, + dictWord{132, 11, 363}, + dictWord{7, 10, 122}, + dictWord{9, 10, 259}, + dictWord{10, 10, 84}, + dictWord{11, 10, 470}, + dictWord{12, 10, 541}, + dictWord{141, 10, 379}, + dictWord{5, 0, 129}, + dictWord{6, 0, 61}, + dictWord{135, 0, 947}, + dictWord{134, 0, 1356}, + dictWord{135, 11, 1191}, + dictWord{13, 0, 505}, + dictWord{141, 0, 506}, + dictWord{11, 0, 1000}, + dictWord{5, 10, 82}, + dictWord{5, 10, 131}, + dictWord{7, 10, 1755}, + dictWord{8, 10, 31}, + dictWord{9, 10, 168}, + dictWord{9, 10, 764}, + dictWord{139, 10, 869}, + dictWord{134, 0, 966}, + dictWord{134, 10, 605}, + dictWord{134, 11, 292}, + dictWord{5, 11, 177}, + dictWord{ + 6, + 11, + 616, + }, + dictWord{7, 11, 827}, + dictWord{9, 11, 525}, + dictWord{138, 11, 656}, + dictWord{135, 11, 1486}, + dictWord{138, 11, 31}, + dictWord{5, 10, 278}, + dictWord{137, 10, 68}, + dictWord{4, 10, 163}, + dictWord{5, 10, 201}, + dictWord{5, 10, 307}, + dictWord{5, 10, 310}, + dictWord{6, 10, 335}, + dictWord{7, 10, 284}, + dictWord{136, 10, 165}, + dictWord{6, 0, 839}, + dictWord{135, 10, 1660}, + dictWord{136, 10, 781}, + dictWord{6, 10, 33}, + dictWord{135, 10, 1244}, + dictWord{ + 133, + 0, + 637, + }, + dictWord{4, 11, 161}, + dictWord{133, 11, 631}, + dictWord{137, 0, 590}, + dictWord{7, 10, 1953}, + dictWord{136, 10, 720}, + dictWord{5, 0, 280}, + dictWord{ + 7, + 0, + 1226, + }, + dictWord{138, 10, 203}, + dictWord{134, 0, 1386}, + dictWord{5, 0, 281}, + dictWord{6, 0, 1026}, + dictWord{6, 10, 326}, + dictWord{7, 10, 677}, + dictWord{ + 137, + 10, + 425, + }, + dictWord{7, 11, 1557}, + dictWord{135, 11, 1684}, + dictWord{135, 0, 1064}, + dictWord{9, 11, 469}, + dictWord{9, 11, 709}, + dictWord{12, 11, 512}, + dictWord{14, 11, 65}, + dictWord{145, 11, 12}, + dictWord{134, 0, 917}, + dictWord{10, 11, 229}, + dictWord{11, 11, 73}, + dictWord{11, 11, 376}, + dictWord{ + 139, + 11, + 433, + }, + dictWord{7, 0, 555}, + dictWord{9, 0, 192}, + dictWord{13, 0, 30}, + dictWord{13, 0, 49}, + dictWord{15, 0, 150}, + dictWord{16, 0, 76}, + dictWord{20, 0, 52}, + dictWord{ + 7, + 10, + 1316, + }, + dictWord{7, 10, 1412}, + dictWord{7, 10, 1839}, + dictWord{9, 10, 589}, + dictWord{11, 10, 241}, + dictWord{11, 10, 676}, + dictWord{11, 10, 811}, + dictWord{11, 10, 891}, + dictWord{12, 10, 140}, + dictWord{12, 10, 346}, + dictWord{12, 10, 479}, + dictWord{13, 10, 381}, + dictWord{14, 10, 188}, + dictWord{ + 146, + 10, + 30, + }, + dictWord{149, 0, 15}, + dictWord{6, 0, 1882}, + dictWord{6, 0, 1883}, + dictWord{6, 0, 1897}, + dictWord{9, 0, 945}, + dictWord{9, 0, 1014}, + dictWord{9, 0, 1020}, + dictWord{12, 0, 823}, + dictWord{12, 0, 842}, + dictWord{12, 0, 866}, + dictWord{12, 0, 934}, + dictWord{15, 0, 242}, + dictWord{146, 0, 208}, + dictWord{6, 0, 965}, + dictWord{134, 0, 1499}, + dictWord{7, 0, 33}, + dictWord{7, 0, 120}, + dictWord{8, 0, 489}, + dictWord{9, 0, 319}, + dictWord{10, 0, 820}, + dictWord{11, 0, 1004}, + dictWord{ + 12, + 0, + 379, + }, + dictWord{12, 0, 679}, + dictWord{13, 0, 117}, + dictWord{13, 0, 412}, + dictWord{14, 0, 25}, + dictWord{15, 0, 52}, + dictWord{15, 0, 161}, + dictWord{16, 0, 47}, + dictWord{149, 0, 2}, + dictWord{6, 11, 558}, + dictWord{7, 11, 651}, + dictWord{8, 11, 421}, + dictWord{9, 11, 0}, + dictWord{138, 11, 34}, + dictWord{4, 0, 937}, + dictWord{ + 5, + 0, + 801, + }, + dictWord{7, 0, 473}, + dictWord{5, 10, 358}, + dictWord{7, 10, 1184}, + dictWord{10, 10, 662}, + dictWord{13, 10, 212}, + dictWord{13, 10, 304}, + dictWord{ + 13, + 10, + 333, + }, + dictWord{145, 10, 98}, + dictWord{132, 0, 877}, + dictWord{6, 0, 693}, + dictWord{134, 0, 824}, + dictWord{132, 0, 365}, + dictWord{7, 11, 1832}, + dictWord{ + 138, + 11, + 374, + }, + dictWord{5, 0, 7}, + dictWord{139, 0, 774}, + dictWord{4, 0, 734}, + dictWord{5, 0, 662}, + dictWord{134, 0, 430}, + dictWord{4, 0, 746}, + dictWord{ + 135, + 0, + 1090, + }, + dictWord{5, 0, 360}, + dictWord{8, 0, 237}, + dictWord{10, 0, 231}, + dictWord{147, 0, 124}, + dictWord{138, 11, 348}, + dictWord{6, 11, 6}, + dictWord{7, 11, 81}, + dictWord{7, 11, 771}, + dictWord{7, 11, 1731}, + dictWord{9, 11, 405}, + dictWord{138, 11, 421}, + dictWord{6, 0, 740}, + dictWord{137, 0, 822}, + dictWord{ + 133, + 10, + 946, + }, + dictWord{7, 0, 1485}, + dictWord{136, 0, 929}, + dictWord{7, 10, 411}, + dictWord{8, 10, 631}, + dictWord{9, 10, 323}, + dictWord{10, 10, 355}, + dictWord{ + 11, + 10, + 491, + }, + dictWord{12, 10, 143}, + dictWord{12, 10, 402}, + dictWord{13, 10, 73}, + dictWord{14, 10, 408}, + dictWord{15, 10, 107}, + dictWord{146, 10, 71}, + dictWord{ + 135, + 10, + 590, + }, + dictWord{5, 11, 881}, + dictWord{133, 11, 885}, + dictWord{150, 11, 25}, + dictWord{4, 0, 852}, + dictWord{5, 11, 142}, + dictWord{134, 11, 546}, + dictWord{7, 10, 1467}, + dictWord{8, 10, 328}, + dictWord{10, 10, 544}, + dictWord{11, 10, 955}, + dictWord{13, 10, 320}, + dictWord{145, 10, 83}, + dictWord{9, 0, 17}, + dictWord{10, 0, 291}, + dictWord{11, 10, 511}, + dictWord{13, 10, 394}, + dictWord{14, 10, 298}, + dictWord{14, 10, 318}, + dictWord{146, 10, 103}, + dictWord{5, 11, 466}, + dictWord{11, 11, 571}, + dictWord{12, 11, 198}, + dictWord{13, 11, 283}, + dictWord{14, 11, 186}, + dictWord{15, 11, 21}, + dictWord{143, 11, 103}, + dictWord{ + 134, + 0, + 1001, + }, + dictWord{4, 11, 185}, + dictWord{5, 11, 257}, + dictWord{5, 11, 839}, + dictWord{5, 11, 936}, + dictWord{7, 11, 171}, + dictWord{9, 11, 399}, + dictWord{ + 10, + 11, + 258, + }, + dictWord{10, 11, 395}, + dictWord{10, 11, 734}, + dictWord{11, 11, 1014}, + dictWord{12, 11, 23}, + dictWord{13, 11, 350}, + dictWord{14, 11, 150}, + dictWord{147, 11, 6}, + dictWord{143, 0, 35}, + dictWord{132, 0, 831}, + dictWord{5, 10, 835}, + dictWord{134, 10, 483}, + dictWord{4, 0, 277}, + dictWord{5, 0, 608}, + dictWord{ + 6, + 0, + 493, + }, + dictWord{7, 0, 457}, + dictWord{12, 0, 384}, + dictWord{7, 11, 404}, + dictWord{7, 11, 1377}, + dictWord{7, 11, 1430}, + dictWord{7, 11, 2017}, + dictWord{ + 8, + 11, + 149, + }, + dictWord{8, 11, 239}, + dictWord{8, 11, 512}, + dictWord{8, 11, 793}, + dictWord{8, 11, 818}, + dictWord{9, 11, 474}, + dictWord{9, 11, 595}, + dictWord{ + 10, + 11, + 122, + }, + dictWord{10, 11, 565}, + dictWord{10, 11, 649}, + dictWord{10, 11, 783}, + dictWord{11, 11, 239}, + dictWord{11, 11, 295}, + dictWord{11, 11, 447}, + dictWord{ + 11, + 11, + 528, + }, + dictWord{11, 11, 639}, + dictWord{11, 11, 800}, + dictWord{11, 11, 936}, + dictWord{12, 11, 25}, + dictWord{12, 11, 73}, + dictWord{12, 11, 77}, + dictWord{12, 11, 157}, + dictWord{12, 11, 316}, + dictWord{12, 11, 390}, + dictWord{12, 11, 391}, + dictWord{12, 11, 394}, + dictWord{12, 11, 395}, + dictWord{ + 12, + 11, + 478, + }, + dictWord{12, 11, 503}, + dictWord{12, 11, 592}, + dictWord{12, 11, 680}, + dictWord{13, 11, 50}, + dictWord{13, 11, 53}, + dictWord{13, 11, 132}, + dictWord{ + 13, + 11, + 198, + }, + dictWord{13, 11, 275}, + dictWord{13, 11, 322}, + dictWord{13, 11, 415}, + dictWord{14, 11, 71}, + dictWord{14, 11, 257}, + dictWord{14, 11, 395}, + dictWord{15, 11, 71}, + dictWord{15, 11, 136}, + dictWord{17, 11, 123}, + dictWord{18, 11, 93}, + dictWord{147, 11, 58}, + dictWord{134, 0, 1351}, + dictWord{7, 0, 27}, + dictWord{135, 0, 316}, + dictWord{136, 11, 712}, + dictWord{136, 0, 984}, + dictWord{133, 0, 552}, + dictWord{137, 0, 264}, + dictWord{132, 0, 401}, + dictWord{6, 0, 710}, + dictWord{6, 0, 1111}, + dictWord{134, 0, 1343}, + dictWord{134, 0, 1211}, + dictWord{9, 0, 543}, + dictWord{10, 0, 524}, + dictWord{11, 0, 108}, + dictWord{11, 0, 653}, + dictWord{12, 0, 524}, + dictWord{13, 0, 123}, + dictWord{14, 0, 252}, + dictWord{16, 0, 18}, + dictWord{19, 0, 38}, + dictWord{20, 0, 26}, + dictWord{20, 0, 65}, + dictWord{ + 21, + 0, + 3, + }, + dictWord{151, 0, 11}, + dictWord{4, 0, 205}, + dictWord{5, 0, 623}, + dictWord{7, 0, 104}, + dictWord{8, 0, 519}, + dictWord{137, 0, 716}, + dictWord{132, 10, 677}, + dictWord{4, 11, 377}, + dictWord{152, 11, 13}, + dictWord{135, 11, 1673}, + dictWord{7, 0, 579}, + dictWord{9, 0, 41}, + dictWord{9, 0, 244}, + dictWord{9, 0, 669}, + dictWord{ + 10, + 0, + 5, + }, + dictWord{11, 0, 861}, + dictWord{11, 0, 951}, + dictWord{139, 0, 980}, + dictWord{132, 0, 717}, + dictWord{136, 0, 1011}, + dictWord{132, 0, 805}, + dictWord{ + 4, + 11, + 180, + }, + dictWord{135, 11, 1906}, + dictWord{132, 10, 777}, + dictWord{132, 10, 331}, + dictWord{132, 0, 489}, + dictWord{6, 0, 1024}, + dictWord{4, 11, 491}, + dictWord{133, 10, 747}, + dictWord{135, 11, 1182}, + dictWord{4, 11, 171}, + dictWord{138, 11, 234}, + dictWord{4, 11, 586}, + dictWord{7, 11, 1186}, + dictWord{ + 138, + 11, + 631, + }, + dictWord{135, 0, 892}, + dictWord{135, 11, 336}, + dictWord{9, 11, 931}, + dictWord{10, 11, 334}, + dictWord{148, 11, 71}, + dictWord{137, 0, 473}, + dictWord{6, 0, 864}, + dictWord{12, 0, 659}, + dictWord{139, 11, 926}, + dictWord{7, 0, 819}, + dictWord{9, 0, 26}, + dictWord{9, 0, 392}, + dictWord{10, 0, 152}, + dictWord{ + 10, + 0, + 226, + }, + dictWord{11, 0, 19}, + dictWord{12, 0, 276}, + dictWord{12, 0, 426}, + dictWord{12, 0, 589}, + dictWord{13, 0, 460}, + dictWord{15, 0, 97}, + dictWord{19, 0, 48}, + dictWord{148, 0, 104}, + dictWord{135, 0, 51}, + dictWord{133, 10, 326}, + dictWord{4, 10, 691}, + dictWord{146, 10, 16}, + dictWord{9, 0, 130}, + dictWord{11, 0, 765}, + dictWord{10, 10, 680}, + dictWord{10, 10, 793}, + dictWord{141, 10, 357}, + dictWord{133, 11, 765}, + dictWord{8, 0, 229}, + dictWord{6, 10, 32}, + dictWord{7, 10, 385}, + dictWord{7, 10, 757}, + dictWord{7, 10, 1916}, + dictWord{8, 10, 94}, + dictWord{8, 10, 711}, + dictWord{9, 10, 541}, + dictWord{10, 10, 162}, + dictWord{10, 10, 795}, + dictWord{11, 10, 989}, + dictWord{11, 10, 1010}, + dictWord{12, 10, 14}, + dictWord{142, 10, 308}, + dictWord{7, 11, 474}, + dictWord{137, 11, 578}, + dictWord{ + 132, + 0, + 674, + }, + dictWord{132, 0, 770}, + dictWord{5, 0, 79}, + dictWord{7, 0, 1027}, + dictWord{7, 0, 1477}, + dictWord{139, 0, 52}, + dictWord{133, 11, 424}, + dictWord{ + 134, + 0, + 1666, + }, + dictWord{6, 0, 409}, + dictWord{6, 10, 349}, + dictWord{6, 10, 1682}, + dictWord{7, 10, 1252}, + dictWord{8, 10, 112}, + dictWord{8, 11, 714}, + dictWord{ + 9, + 10, + 435, + }, + dictWord{9, 10, 668}, + dictWord{10, 10, 290}, + dictWord{10, 10, 319}, + dictWord{10, 10, 815}, + dictWord{11, 10, 180}, + dictWord{11, 10, 837}, + dictWord{ + 12, + 10, + 240, + }, + dictWord{13, 10, 152}, + dictWord{13, 10, 219}, + dictWord{142, 10, 158}, + dictWord{5, 0, 789}, + dictWord{134, 0, 195}, + dictWord{4, 0, 251}, + dictWord{ + 4, + 0, + 688, + }, + dictWord{7, 0, 513}, + dictWord{135, 0, 1284}, + dictWord{132, 10, 581}, + dictWord{9, 11, 420}, + dictWord{10, 11, 269}, + dictWord{10, 11, 285}, + dictWord{10, 11, 576}, + dictWord{11, 11, 397}, + dictWord{13, 11, 175}, + dictWord{145, 11, 90}, + dictWord{6, 10, 126}, + dictWord{7, 10, 573}, + dictWord{8, 10, 397}, + dictWord{142, 10, 44}, + dictWord{132, 11, 429}, + dictWord{133, 0, 889}, + dictWord{4, 0, 160}, + dictWord{5, 0, 330}, + dictWord{7, 0, 1434}, + dictWord{136, 0, 174}, + dictWord{7, 11, 18}, + dictWord{7, 11, 699}, + dictWord{7, 11, 1966}, + dictWord{8, 11, 752}, + dictWord{9, 11, 273}, + dictWord{9, 11, 412}, + dictWord{9, 11, 703}, + dictWord{ + 10, + 11, + 71, + }, + dictWord{10, 11, 427}, + dictWord{10, 11, 508}, + dictWord{146, 11, 97}, + dictWord{6, 0, 872}, + dictWord{134, 0, 899}, + dictWord{133, 10, 926}, + dictWord{134, 0, 1126}, + dictWord{134, 0, 918}, + dictWord{4, 11, 53}, + dictWord{5, 11, 186}, + dictWord{135, 11, 752}, + dictWord{7, 0, 268}, + dictWord{136, 0, 569}, + dictWord{134, 0, 1224}, + dictWord{6, 0, 1361}, + dictWord{7, 10, 1232}, + dictWord{137, 10, 531}, + dictWord{8, 11, 575}, + dictWord{10, 11, 289}, + dictWord{ + 139, + 11, + 319, + }, + dictWord{133, 10, 670}, + dictWord{132, 11, 675}, + dictWord{133, 0, 374}, + dictWord{135, 10, 1957}, + dictWord{133, 0, 731}, + dictWord{11, 0, 190}, + dictWord{15, 0, 49}, + dictWord{11, 11, 190}, + dictWord{143, 11, 49}, + dictWord{4, 0, 626}, + dictWord{5, 0, 506}, + dictWord{5, 0, 642}, + dictWord{6, 0, 425}, + dictWord{ + 10, + 0, + 202, + }, + dictWord{139, 0, 141}, + dictWord{137, 0, 444}, + dictWord{7, 10, 242}, + dictWord{135, 10, 1942}, + dictWord{6, 11, 209}, + dictWord{8, 11, 468}, + dictWord{ + 9, + 11, + 210, + }, + dictWord{11, 11, 36}, + dictWord{12, 11, 28}, + dictWord{12, 11, 630}, + dictWord{13, 11, 21}, + dictWord{13, 11, 349}, + dictWord{14, 11, 7}, + dictWord{ + 145, + 11, + 13, + }, + dictWord{4, 11, 342}, + dictWord{135, 11, 1179}, + dictWord{5, 10, 834}, + dictWord{7, 10, 1202}, + dictWord{8, 10, 14}, + dictWord{9, 10, 481}, + dictWord{ + 137, + 10, + 880, + }, + dictWord{4, 11, 928}, + dictWord{133, 11, 910}, + dictWord{4, 11, 318}, + dictWord{4, 11, 496}, + dictWord{7, 11, 856}, + dictWord{139, 11, 654}, + dictWord{136, 0, 835}, + dictWord{7, 0, 1526}, + dictWord{138, 10, 465}, + dictWord{151, 0, 17}, + dictWord{135, 0, 477}, + dictWord{4, 10, 357}, + dictWord{6, 10, 172}, + dictWord{7, 10, 143}, + dictWord{137, 10, 413}, + dictWord{6, 0, 1374}, + dictWord{138, 0, 994}, + dictWord{18, 0, 76}, + dictWord{132, 10, 590}, + dictWord{7, 0, 287}, + dictWord{8, 0, 355}, + dictWord{9, 0, 293}, + dictWord{137, 0, 743}, + dictWord{134, 0, 1389}, + dictWord{7, 11, 915}, + dictWord{8, 11, 247}, + dictWord{147, 11, 0}, + dictWord{ + 4, + 11, + 202, + }, + dictWord{5, 11, 382}, + dictWord{6, 11, 454}, + dictWord{7, 11, 936}, + dictWord{7, 11, 1803}, + dictWord{8, 11, 758}, + dictWord{9, 11, 375}, + dictWord{ + 9, + 11, + 895, + }, + dictWord{10, 11, 743}, + dictWord{10, 11, 792}, + dictWord{11, 11, 978}, + dictWord{11, 11, 1012}, + dictWord{142, 11, 109}, + dictWord{5, 0, 384}, + dictWord{8, 0, 455}, + dictWord{140, 0, 48}, + dictWord{132, 11, 390}, + dictWord{5, 10, 169}, + dictWord{7, 10, 333}, + dictWord{136, 10, 45}, + dictWord{5, 0, 264}, + dictWord{134, 0, 184}, + dictWord{138, 11, 791}, + dictWord{133, 11, 717}, + dictWord{132, 10, 198}, + dictWord{6, 11, 445}, + dictWord{7, 11, 332}, + dictWord{ + 137, + 11, + 909, + }, + dictWord{136, 0, 1001}, + dictWord{4, 10, 24}, + dictWord{5, 10, 140}, + dictWord{5, 10, 185}, + dictWord{7, 10, 1500}, + dictWord{11, 10, 565}, + dictWord{ + 139, + 10, + 838, + }, + dictWord{134, 11, 578}, + dictWord{5, 0, 633}, + dictWord{6, 0, 28}, + dictWord{135, 0, 1323}, + dictWord{132, 0, 851}, + dictWord{136, 11, 267}, + dictWord{ + 7, + 0, + 359, + }, + dictWord{8, 0, 243}, + dictWord{140, 0, 175}, + dictWord{4, 10, 334}, + dictWord{133, 10, 593}, + dictWord{141, 11, 87}, + dictWord{136, 11, 766}, + dictWord{10, 0, 287}, + dictWord{12, 0, 138}, + dictWord{10, 11, 287}, + dictWord{140, 11, 138}, + dictWord{4, 0, 105}, + dictWord{132, 0, 740}, + dictWord{140, 10, 116}, + dictWord{134, 0, 857}, + dictWord{135, 11, 1841}, + dictWord{6, 0, 1402}, + dictWord{137, 0, 819}, + dictWord{132, 11, 584}, + dictWord{132, 10, 709}, + dictWord{ + 133, + 10, + 897, + }, + dictWord{5, 0, 224}, + dictWord{13, 0, 174}, + dictWord{146, 0, 52}, + dictWord{135, 10, 1840}, + dictWord{4, 10, 608}, + dictWord{133, 10, 497}, + dictWord{139, 11, 60}, + dictWord{4, 0, 758}, + dictWord{135, 0, 1649}, + dictWord{4, 11, 226}, + dictWord{4, 11, 326}, + dictWord{135, 11, 1770}, + dictWord{5, 11, 426}, + dictWord{8, 11, 30}, + dictWord{9, 11, 2}, + dictWord{11, 11, 549}, + dictWord{147, 11, 122}, + dictWord{135, 10, 2039}, + dictWord{6, 10, 540}, + dictWord{ + 136, + 10, + 136, + }, + dictWord{4, 0, 573}, + dictWord{8, 0, 655}, + dictWord{4, 10, 897}, + dictWord{133, 10, 786}, + dictWord{7, 0, 351}, + dictWord{139, 0, 128}, + dictWord{ + 133, + 10, + 999, + }, + dictWord{4, 10, 299}, + dictWord{135, 10, 1004}, + dictWord{133, 0, 918}, + dictWord{132, 11, 345}, + dictWord{4, 11, 385}, + dictWord{7, 11, 265}, + dictWord{135, 11, 587}, + dictWord{133, 10, 456}, + dictWord{136, 10, 180}, + dictWord{6, 0, 687}, + dictWord{134, 0, 1537}, + dictWord{4, 11, 347}, + dictWord{ + 5, + 11, + 423, + }, + dictWord{5, 11, 996}, + dictWord{135, 11, 1329}, + dictWord{132, 10, 755}, + dictWord{7, 11, 1259}, + dictWord{9, 11, 125}, + dictWord{11, 11, 65}, + dictWord{140, 11, 285}, + dictWord{5, 11, 136}, + dictWord{6, 11, 136}, + dictWord{136, 11, 644}, + dictWord{134, 0, 1525}, + dictWord{4, 0, 1009}, + dictWord{ + 135, + 0, + 1139, + }, + dictWord{139, 10, 338}, + dictWord{132, 0, 340}, + dictWord{135, 10, 1464}, + dictWord{8, 0, 847}, + dictWord{10, 0, 861}, + dictWord{10, 0, 876}, + dictWord{ + 10, + 0, + 889, + }, + dictWord{10, 0, 922}, + dictWord{10, 0, 929}, + dictWord{10, 0, 933}, + dictWord{12, 0, 784}, + dictWord{140, 0, 791}, + dictWord{139, 0, 176}, + dictWord{ + 9, + 11, + 134, + }, + dictWord{10, 11, 2}, + dictWord{10, 11, 27}, + dictWord{10, 11, 333}, + dictWord{11, 11, 722}, + dictWord{143, 11, 1}, + dictWord{4, 11, 433}, + dictWord{ + 133, + 11, + 719, + }, + dictWord{5, 0, 985}, + dictWord{7, 0, 509}, + dictWord{7, 0, 529}, + dictWord{145, 0, 96}, + dictWord{132, 0, 615}, + dictWord{4, 10, 890}, + dictWord{ + 5, + 10, + 805, + }, + dictWord{5, 10, 819}, + dictWord{5, 10, 961}, + dictWord{6, 10, 396}, + dictWord{6, 10, 1631}, + dictWord{6, 10, 1678}, + dictWord{7, 10, 1967}, + dictWord{ + 7, + 10, + 2041, + }, + dictWord{9, 10, 630}, + dictWord{11, 10, 8}, + dictWord{11, 10, 1019}, + dictWord{12, 10, 176}, + dictWord{13, 10, 225}, + dictWord{14, 10, 292}, + dictWord{ + 149, + 10, + 24, + }, + dictWord{135, 0, 1919}, + dictWord{134, 0, 1131}, + dictWord{144, 11, 21}, + dictWord{144, 11, 51}, + dictWord{135, 10, 1815}, + dictWord{4, 0, 247}, + dictWord{7, 10, 1505}, + dictWord{10, 10, 190}, + dictWord{10, 10, 634}, + dictWord{11, 10, 792}, + dictWord{12, 10, 358}, + dictWord{140, 10, 447}, + dictWord{ + 5, + 10, + 0, + }, + dictWord{6, 10, 536}, + dictWord{7, 10, 604}, + dictWord{13, 10, 445}, + dictWord{145, 10, 126}, + dictWord{4, 0, 184}, + dictWord{5, 0, 390}, + dictWord{6, 0, 337}, + dictWord{7, 0, 23}, + dictWord{7, 0, 494}, + dictWord{7, 0, 618}, + dictWord{7, 0, 1456}, + dictWord{8, 0, 27}, + dictWord{8, 0, 599}, + dictWord{10, 0, 153}, + dictWord{ + 139, + 0, + 710, + }, + dictWord{6, 10, 232}, + dictWord{6, 10, 412}, + dictWord{7, 10, 1074}, + dictWord{8, 10, 9}, + dictWord{8, 10, 157}, + dictWord{8, 10, 786}, + dictWord{9, 10, 196}, + dictWord{9, 10, 352}, + dictWord{9, 10, 457}, + dictWord{10, 10, 337}, + dictWord{11, 10, 232}, + dictWord{11, 10, 877}, + dictWord{12, 10, 480}, + dictWord{ + 140, + 10, + 546, + }, + dictWord{13, 0, 38}, + dictWord{135, 10, 958}, + dictWord{4, 10, 382}, + dictWord{136, 10, 579}, + dictWord{4, 10, 212}, + dictWord{135, 10, 1206}, + dictWord{ + 4, + 11, + 555, + }, + dictWord{8, 11, 536}, + dictWord{138, 11, 288}, + dictWord{11, 11, 139}, + dictWord{139, 11, 171}, + dictWord{9, 11, 370}, + dictWord{138, 11, 90}, + dictWord{132, 0, 1015}, + dictWord{134, 0, 1088}, + dictWord{5, 10, 655}, + dictWord{135, 11, 977}, + dictWord{134, 0, 1585}, + dictWord{17, 10, 67}, + dictWord{ + 147, + 10, + 74, + }, + dictWord{10, 0, 227}, + dictWord{11, 0, 497}, + dictWord{11, 0, 709}, + dictWord{140, 0, 415}, + dictWord{6, 0, 360}, + dictWord{7, 0, 1664}, + dictWord{ + 136, + 0, + 478, + }, + dictWord{7, 0, 95}, + dictWord{6, 10, 231}, + dictWord{136, 10, 423}, + dictWord{140, 11, 65}, + dictWord{4, 11, 257}, + dictWord{135, 11, 2031}, + dictWord{ + 135, + 11, + 1768, + }, + dictWord{133, 10, 300}, + dictWord{139, 11, 211}, + dictWord{136, 0, 699}, + dictWord{6, 10, 237}, + dictWord{7, 10, 611}, + dictWord{8, 10, 100}, + dictWord{9, 10, 416}, + dictWord{11, 10, 335}, + dictWord{12, 10, 173}, + dictWord{146, 10, 101}, + dictWord{14, 0, 26}, + dictWord{146, 0, 150}, + dictWord{6, 0, 581}, + dictWord{135, 0, 1119}, + dictWord{135, 10, 1208}, + dictWord{132, 0, 739}, + dictWord{6, 11, 83}, + dictWord{6, 11, 1733}, + dictWord{135, 11, 1389}, + dictWord{ + 137, + 0, + 869, + }, + dictWord{4, 0, 67}, + dictWord{5, 0, 422}, + dictWord{7, 0, 1037}, + dictWord{7, 0, 1289}, + dictWord{7, 0, 1555}, + dictWord{9, 0, 741}, + dictWord{145, 0, 108}, + dictWord{133, 10, 199}, + dictWord{12, 10, 427}, + dictWord{146, 10, 38}, + dictWord{136, 0, 464}, + dictWord{142, 0, 42}, + dictWord{10, 0, 96}, + dictWord{8, 11, 501}, + dictWord{137, 11, 696}, + dictWord{134, 11, 592}, + dictWord{4, 0, 512}, + dictWord{4, 0, 966}, + dictWord{5, 0, 342}, + dictWord{6, 0, 1855}, + dictWord{8, 0, 869}, + dictWord{8, 0, 875}, + dictWord{8, 0, 901}, + dictWord{144, 0, 26}, + dictWord{8, 0, 203}, + dictWord{11, 0, 823}, + dictWord{11, 0, 846}, + dictWord{12, 0, 482}, + dictWord{ + 13, + 0, + 277, + }, + dictWord{13, 0, 302}, + dictWord{13, 0, 464}, + dictWord{14, 0, 205}, + dictWord{142, 0, 221}, + dictWord{4, 0, 449}, + dictWord{133, 0, 718}, + dictWord{ + 7, + 11, + 1718, + }, + dictWord{9, 11, 95}, + dictWord{9, 11, 274}, + dictWord{10, 11, 279}, + dictWord{10, 11, 317}, + dictWord{10, 11, 420}, + dictWord{11, 11, 303}, + dictWord{ + 11, + 11, + 808, + }, + dictWord{12, 11, 134}, + dictWord{12, 11, 367}, + dictWord{13, 11, 149}, + dictWord{13, 11, 347}, + dictWord{14, 11, 349}, + dictWord{14, 11, 406}, + dictWord{18, 11, 22}, + dictWord{18, 11, 89}, + dictWord{18, 11, 122}, + dictWord{147, 11, 47}, + dictWord{133, 11, 26}, + dictWord{4, 0, 355}, + dictWord{6, 0, 311}, + dictWord{ + 9, + 0, + 256, + }, + dictWord{138, 0, 404}, + dictWord{132, 11, 550}, + dictWord{10, 0, 758}, + dictWord{6, 10, 312}, + dictWord{6, 10, 1715}, + dictWord{10, 10, 584}, + dictWord{11, 10, 546}, + dictWord{11, 10, 692}, + dictWord{12, 10, 259}, + dictWord{12, 10, 295}, + dictWord{13, 10, 46}, + dictWord{141, 10, 154}, + dictWord{ + 136, + 11, + 822, + }, + dictWord{5, 0, 827}, + dictWord{4, 11, 902}, + dictWord{5, 11, 809}, + dictWord{6, 11, 122}, + dictWord{135, 11, 896}, + dictWord{5, 0, 64}, + dictWord{140, 0, 581}, + dictWord{4, 0, 442}, + dictWord{6, 0, 739}, + dictWord{7, 0, 1047}, + dictWord{7, 0, 1352}, + dictWord{7, 0, 1643}, + dictWord{7, 11, 1911}, + dictWord{9, 11, 449}, + dictWord{10, 11, 192}, + dictWord{138, 11, 740}, + dictWord{135, 11, 262}, + dictWord{132, 10, 588}, + dictWord{133, 11, 620}, + dictWord{5, 0, 977}, + dictWord{ + 6, + 0, + 288, + }, + dictWord{7, 0, 528}, + dictWord{4, 11, 34}, + dictWord{5, 11, 574}, + dictWord{7, 11, 279}, + dictWord{7, 11, 1624}, + dictWord{136, 11, 601}, + dictWord{ + 6, + 0, + 1375, + }, + dictWord{4, 10, 231}, + dictWord{5, 10, 61}, + dictWord{6, 10, 104}, + dictWord{7, 10, 729}, + dictWord{7, 10, 964}, + dictWord{7, 10, 1658}, + dictWord{ + 140, + 10, + 414, + }, + dictWord{6, 10, 263}, + dictWord{138, 10, 757}, + dictWord{132, 10, 320}, + dictWord{4, 0, 254}, + dictWord{7, 0, 1309}, + dictWord{5, 11, 332}, + dictWord{ + 135, + 11, + 1309, + }, + dictWord{6, 11, 261}, + dictWord{8, 11, 182}, + dictWord{139, 11, 943}, + dictWord{132, 10, 225}, + dictWord{6, 0, 12}, + dictWord{135, 0, 1219}, + dictWord{4, 0, 275}, + dictWord{12, 0, 376}, + dictWord{6, 11, 1721}, + dictWord{141, 11, 490}, + dictWord{4, 11, 933}, + dictWord{133, 11, 880}, + dictWord{6, 0, 951}, + dictWord{6, 0, 1109}, + dictWord{6, 0, 1181}, + dictWord{7, 0, 154}, + dictWord{4, 10, 405}, + dictWord{7, 10, 817}, + dictWord{14, 10, 58}, + dictWord{17, 10, 37}, + dictWord{ + 146, + 10, + 124, + }, + dictWord{6, 0, 1520}, + dictWord{133, 10, 974}, + dictWord{134, 0, 1753}, + dictWord{6, 0, 369}, + dictWord{6, 0, 502}, + dictWord{7, 0, 1036}, + dictWord{ + 8, + 0, + 348, + }, + dictWord{9, 0, 452}, + dictWord{10, 0, 26}, + dictWord{11, 0, 224}, + dictWord{11, 0, 387}, + dictWord{11, 0, 772}, + dictWord{12, 0, 95}, + dictWord{12, 0, 629}, + dictWord{13, 0, 195}, + dictWord{13, 0, 207}, + dictWord{13, 0, 241}, + dictWord{14, 0, 260}, + dictWord{14, 0, 270}, + dictWord{143, 0, 140}, + dictWord{132, 0, 269}, + dictWord{5, 0, 480}, + dictWord{7, 0, 532}, + dictWord{7, 0, 1197}, + dictWord{7, 0, 1358}, + dictWord{8, 0, 291}, + dictWord{11, 0, 349}, + dictWord{142, 0, 396}, + dictWord{ + 5, + 10, + 235, + }, + dictWord{7, 10, 1239}, + dictWord{11, 10, 131}, + dictWord{140, 10, 370}, + dictWord{7, 10, 956}, + dictWord{7, 10, 1157}, + dictWord{7, 10, 1506}, + dictWord{ + 7, + 10, + 1606, + }, + dictWord{7, 10, 1615}, + dictWord{7, 10, 1619}, + dictWord{7, 10, 1736}, + dictWord{7, 10, 1775}, + dictWord{8, 10, 590}, + dictWord{9, 10, 324}, + dictWord{9, 10, 736}, + dictWord{9, 10, 774}, + dictWord{9, 10, 776}, + dictWord{9, 10, 784}, + dictWord{10, 10, 567}, + dictWord{10, 10, 708}, + dictWord{11, 10, 518}, + dictWord{11, 10, 613}, + dictWord{11, 10, 695}, + dictWord{11, 10, 716}, + dictWord{11, 10, 739}, + dictWord{11, 10, 770}, + dictWord{11, 10, 771}, + dictWord{ + 11, + 10, + 848, + }, + dictWord{11, 10, 857}, + dictWord{11, 10, 931}, + dictWord{11, 10, 947}, + dictWord{12, 10, 326}, + dictWord{12, 10, 387}, + dictWord{12, 10, 484}, + dictWord{ + 12, + 10, + 528, + }, + dictWord{12, 10, 552}, + dictWord{12, 10, 613}, + dictWord{13, 10, 189}, + dictWord{13, 10, 256}, + dictWord{13, 10, 340}, + dictWord{13, 10, 432}, + dictWord{13, 10, 436}, + dictWord{13, 10, 440}, + dictWord{13, 10, 454}, + dictWord{14, 10, 174}, + dictWord{14, 10, 220}, + dictWord{14, 10, 284}, + dictWord{ + 14, + 10, + 390, + }, + dictWord{145, 10, 121}, + dictWord{8, 11, 598}, + dictWord{9, 11, 664}, + dictWord{138, 11, 441}, + dictWord{9, 10, 137}, + dictWord{138, 10, 221}, + dictWord{133, 11, 812}, + dictWord{148, 0, 15}, + dictWord{134, 0, 1341}, + dictWord{6, 0, 1017}, + dictWord{4, 11, 137}, + dictWord{7, 11, 1178}, + dictWord{ + 135, + 11, + 1520, + }, + dictWord{7, 10, 390}, + dictWord{138, 10, 140}, + dictWord{7, 11, 1260}, + dictWord{135, 11, 1790}, + dictWord{137, 11, 191}, + dictWord{ + 135, + 10, + 1144, + }, + dictWord{6, 0, 1810}, + dictWord{7, 0, 657}, + dictWord{8, 0, 886}, + dictWord{10, 0, 857}, + dictWord{14, 0, 440}, + dictWord{144, 0, 96}, + dictWord{8, 0, 533}, + dictWord{6, 11, 1661}, + dictWord{7, 11, 1975}, + dictWord{7, 11, 2009}, + dictWord{135, 11, 2011}, + dictWord{6, 0, 1453}, + dictWord{134, 10, 464}, + dictWord{ + 132, + 11, + 715, + }, + dictWord{5, 10, 407}, + dictWord{11, 10, 204}, + dictWord{11, 10, 243}, + dictWord{11, 10, 489}, + dictWord{12, 10, 293}, + dictWord{19, 10, 37}, + dictWord{20, 10, 73}, + dictWord{150, 10, 38}, + dictWord{133, 11, 703}, + dictWord{4, 0, 211}, + dictWord{7, 0, 1483}, + dictWord{5, 10, 325}, + dictWord{8, 10, 5}, + dictWord{ + 8, + 10, + 227, + }, + dictWord{9, 10, 105}, + dictWord{10, 10, 585}, + dictWord{140, 10, 614}, + dictWord{4, 0, 332}, + dictWord{5, 0, 335}, + dictWord{6, 0, 238}, + dictWord{ + 7, + 0, + 269, + }, + dictWord{7, 0, 811}, + dictWord{7, 0, 1797}, + dictWord{8, 0, 836}, + dictWord{9, 0, 507}, + dictWord{141, 0, 242}, + dictWord{5, 11, 89}, + dictWord{7, 11, 1915}, + dictWord{9, 11, 185}, + dictWord{9, 11, 235}, + dictWord{9, 11, 496}, + dictWord{10, 11, 64}, + dictWord{10, 11, 270}, + dictWord{10, 11, 403}, + dictWord{10, 11, 469}, + dictWord{10, 11, 529}, + dictWord{10, 11, 590}, + dictWord{11, 11, 140}, + dictWord{11, 11, 860}, + dictWord{13, 11, 1}, + dictWord{13, 11, 422}, + dictWord{14, 11, 341}, + dictWord{14, 11, 364}, + dictWord{17, 11, 93}, + dictWord{18, 11, 113}, + dictWord{19, 11, 97}, + dictWord{147, 11, 113}, + dictWord{133, 11, 695}, + dictWord{ + 16, + 0, + 19, + }, + dictWord{5, 11, 6}, + dictWord{6, 11, 183}, + dictWord{6, 10, 621}, + dictWord{7, 11, 680}, + dictWord{7, 11, 978}, + dictWord{7, 11, 1013}, + dictWord{7, 11, 1055}, + dictWord{12, 11, 230}, + dictWord{13, 11, 172}, + dictWord{13, 10, 504}, + dictWord{146, 11, 29}, + dictWord{136, 0, 156}, + dictWord{133, 0, 1009}, + dictWord{ + 6, + 11, + 29, + }, + dictWord{139, 11, 63}, + dictWord{134, 0, 820}, + dictWord{134, 10, 218}, + dictWord{7, 10, 454}, + dictWord{7, 10, 782}, + dictWord{8, 10, 768}, + dictWord{ + 140, + 10, + 686, + }, + dictWord{5, 0, 228}, + dictWord{6, 0, 203}, + dictWord{7, 0, 156}, + dictWord{8, 0, 347}, + dictWord{9, 0, 265}, + dictWord{18, 0, 39}, + dictWord{20, 0, 54}, + dictWord{21, 0, 31}, + dictWord{22, 0, 3}, + dictWord{23, 0, 0}, + dictWord{15, 11, 8}, + dictWord{18, 11, 39}, + dictWord{20, 11, 54}, + dictWord{21, 11, 31}, + dictWord{22, 11, 3}, + dictWord{151, 11, 0}, + dictWord{7, 0, 1131}, + dictWord{135, 0, 1468}, + dictWord{144, 10, 0}, + dictWord{134, 0, 1276}, + dictWord{10, 10, 676}, + dictWord{ + 140, + 10, + 462, + }, + dictWord{132, 11, 311}, + dictWord{134, 11, 1740}, + dictWord{7, 11, 170}, + dictWord{8, 11, 90}, + dictWord{8, 11, 177}, + dictWord{8, 11, 415}, + dictWord{ + 11, + 11, + 714, + }, + dictWord{142, 11, 281}, + dictWord{134, 10, 164}, + dictWord{6, 0, 1792}, + dictWord{138, 0, 849}, + dictWord{150, 10, 50}, + dictWord{5, 0, 291}, + dictWord{5, 0, 318}, + dictWord{7, 0, 765}, + dictWord{9, 0, 389}, + dictWord{12, 0, 548}, + dictWord{8, 11, 522}, + dictWord{142, 11, 328}, + dictWord{11, 11, 91}, + dictWord{ + 13, + 11, + 129, + }, + dictWord{15, 11, 101}, + dictWord{145, 11, 125}, + dictWord{4, 11, 494}, + dictWord{6, 11, 74}, + dictWord{7, 11, 44}, + dictWord{7, 11, 407}, + dictWord{ + 8, + 11, + 551, + }, + dictWord{12, 11, 17}, + dictWord{15, 11, 5}, + dictWord{148, 11, 11}, + dictWord{4, 11, 276}, + dictWord{133, 11, 296}, + dictWord{6, 10, 343}, + dictWord{ + 7, + 10, + 195, + }, + dictWord{7, 11, 1777}, + dictWord{9, 10, 226}, + dictWord{10, 10, 197}, + dictWord{10, 10, 575}, + dictWord{11, 10, 502}, + dictWord{139, 10, 899}, + dictWord{ + 10, + 0, + 525, + }, + dictWord{139, 0, 82}, + dictWord{14, 0, 453}, + dictWord{4, 11, 7}, + dictWord{5, 11, 90}, + dictWord{5, 11, 158}, + dictWord{6, 11, 542}, + dictWord{7, 11, 221}, + dictWord{7, 11, 1574}, + dictWord{9, 11, 490}, + dictWord{10, 11, 540}, + dictWord{11, 11, 443}, + dictWord{139, 11, 757}, + dictWord{135, 0, 666}, + dictWord{ + 22, + 10, + 29, + }, + dictWord{150, 11, 29}, + dictWord{4, 0, 422}, + dictWord{147, 10, 8}, + dictWord{5, 0, 355}, + dictWord{145, 0, 0}, + dictWord{6, 0, 1873}, + dictWord{9, 0, 918}, + dictWord{7, 11, 588}, + dictWord{9, 11, 175}, + dictWord{138, 11, 530}, + dictWord{143, 11, 31}, + dictWord{11, 0, 165}, + dictWord{7, 10, 1125}, + dictWord{9, 10, 143}, + dictWord{14, 10, 405}, + dictWord{150, 10, 21}, + dictWord{9, 0, 260}, + dictWord{137, 0, 905}, + dictWord{5, 11, 872}, + dictWord{6, 11, 57}, + dictWord{6, 11, 479}, + dictWord{ + 6, + 11, + 562, + }, + dictWord{7, 11, 471}, + dictWord{7, 11, 1060}, + dictWord{9, 11, 447}, + dictWord{9, 11, 454}, + dictWord{141, 11, 6}, + dictWord{138, 11, 704}, + dictWord{133, 0, 865}, + dictWord{5, 0, 914}, + dictWord{134, 0, 1625}, + dictWord{133, 0, 234}, + dictWord{7, 0, 1383}, + dictWord{5, 11, 31}, + dictWord{6, 11, 614}, + dictWord{145, 11, 61}, + dictWord{7, 11, 1200}, + dictWord{138, 11, 460}, + dictWord{6, 11, 424}, + dictWord{135, 11, 1866}, + dictWord{136, 0, 306}, + dictWord{ + 5, + 10, + 959, + }, + dictWord{12, 11, 30}, + dictWord{13, 11, 148}, + dictWord{14, 11, 87}, + dictWord{14, 11, 182}, + dictWord{16, 11, 42}, + dictWord{18, 11, 92}, + dictWord{ + 148, + 11, + 70, + }, + dictWord{6, 0, 1919}, + dictWord{6, 0, 1921}, + dictWord{9, 0, 923}, + dictWord{9, 0, 930}, + dictWord{9, 0, 941}, + dictWord{9, 0, 949}, + dictWord{9, 0, 987}, + dictWord{ + 9, + 0, + 988, + }, + dictWord{9, 0, 992}, + dictWord{12, 0, 802}, + dictWord{12, 0, 815}, + dictWord{12, 0, 856}, + dictWord{12, 0, 885}, + dictWord{12, 0, 893}, + dictWord{ + 12, + 0, + 898, + }, + dictWord{12, 0, 919}, + dictWord{12, 0, 920}, + dictWord{12, 0, 941}, + dictWord{12, 0, 947}, + dictWord{15, 0, 183}, + dictWord{15, 0, 185}, + dictWord{15, 0, 189}, + dictWord{15, 0, 197}, + dictWord{15, 0, 202}, + dictWord{15, 0, 233}, + dictWord{18, 0, 218}, + dictWord{18, 0, 219}, + dictWord{18, 0, 233}, + dictWord{143, 11, 156}, + dictWord{135, 10, 1759}, + dictWord{136, 10, 173}, + dictWord{13, 0, 163}, + dictWord{13, 0, 180}, + dictWord{18, 0, 78}, + dictWord{20, 0, 35}, + dictWord{5, 11, 13}, + dictWord{134, 11, 142}, + dictWord{134, 10, 266}, + dictWord{6, 11, 97}, + dictWord{7, 11, 116}, + dictWord{8, 11, 322}, + dictWord{8, 11, 755}, + dictWord{9, 11, 548}, + dictWord{10, 11, 714}, + dictWord{11, 11, 884}, + dictWord{141, 11, 324}, + dictWord{135, 0, 1312}, + dictWord{9, 0, 814}, + dictWord{137, 11, 676}, + dictWord{ + 133, + 0, + 707, + }, + dictWord{135, 0, 1493}, + dictWord{6, 0, 421}, + dictWord{7, 0, 61}, + dictWord{7, 0, 1540}, + dictWord{10, 0, 11}, + dictWord{138, 0, 501}, + dictWord{12, 0, 733}, + dictWord{12, 0, 766}, + dictWord{7, 11, 866}, + dictWord{135, 11, 1163}, + dictWord{137, 0, 341}, + dictWord{142, 0, 98}, + dictWord{145, 11, 115}, + dictWord{ + 135, + 11, + 1111, + }, + dictWord{136, 10, 300}, + dictWord{136, 0, 1014}, + dictWord{8, 11, 1}, + dictWord{9, 11, 112}, + dictWord{138, 11, 326}, + dictWord{132, 11, 730}, + dictWord{5, 11, 488}, + dictWord{6, 11, 527}, + dictWord{7, 11, 489}, + dictWord{7, 11, 1636}, + dictWord{8, 11, 121}, + dictWord{8, 11, 144}, + dictWord{8, 11, 359}, + dictWord{ + 9, + 11, + 193, + }, + dictWord{9, 11, 241}, + dictWord{9, 11, 336}, + dictWord{9, 11, 882}, + dictWord{11, 11, 266}, + dictWord{11, 11, 372}, + dictWord{11, 11, 944}, + dictWord{ + 12, + 11, + 401, + }, + dictWord{140, 11, 641}, + dictWord{6, 0, 971}, + dictWord{134, 0, 1121}, + dictWord{6, 0, 102}, + dictWord{7, 0, 72}, + dictWord{15, 0, 142}, + dictWord{ + 147, + 0, + 67, + }, + dictWord{151, 0, 30}, + dictWord{135, 0, 823}, + dictWord{134, 0, 1045}, + dictWord{5, 10, 427}, + dictWord{5, 10, 734}, + dictWord{7, 10, 478}, + dictWord{ + 136, + 10, + 52, + }, + dictWord{7, 0, 1930}, + dictWord{11, 10, 217}, + dictWord{142, 10, 165}, + dictWord{6, 0, 1512}, + dictWord{135, 0, 1870}, + dictWord{9, 11, 31}, + dictWord{ + 10, + 11, + 244, + }, + dictWord{10, 11, 699}, + dictWord{12, 11, 149}, + dictWord{141, 11, 497}, + dictWord{133, 11, 377}, + dictWord{145, 11, 101}, + dictWord{ + 10, + 11, + 158, + }, + dictWord{13, 11, 13}, + dictWord{13, 11, 137}, + dictWord{13, 11, 258}, + dictWord{14, 11, 111}, + dictWord{14, 11, 225}, + dictWord{14, 11, 253}, + dictWord{ + 14, + 11, + 304, + }, + dictWord{14, 11, 339}, + dictWord{14, 11, 417}, + dictWord{146, 11, 33}, + dictWord{6, 0, 87}, + dictWord{6, 10, 1734}, + dictWord{7, 10, 20}, + dictWord{ + 7, + 10, + 1056, + }, + dictWord{8, 10, 732}, + dictWord{9, 10, 406}, + dictWord{9, 10, 911}, + dictWord{138, 10, 694}, + dictWord{134, 0, 1243}, + dictWord{137, 0, 245}, + dictWord{ + 7, + 0, + 68, + }, + dictWord{8, 0, 48}, + dictWord{8, 0, 88}, + dictWord{8, 0, 582}, + dictWord{8, 0, 681}, + dictWord{9, 0, 373}, + dictWord{9, 0, 864}, + dictWord{11, 0, 157}, + dictWord{ + 11, + 0, + 336, + }, + dictWord{11, 0, 843}, + dictWord{148, 0, 27}, + dictWord{8, 11, 663}, + dictWord{144, 11, 8}, + dictWord{133, 10, 613}, + dictWord{4, 0, 88}, + dictWord{ + 5, + 0, + 137, + }, + dictWord{5, 0, 174}, + dictWord{5, 0, 777}, + dictWord{6, 0, 1664}, + dictWord{6, 0, 1725}, + dictWord{7, 0, 77}, + dictWord{7, 0, 426}, + dictWord{7, 0, 1317}, + dictWord{ + 7, + 0, + 1355, + }, + dictWord{8, 0, 126}, + dictWord{8, 0, 563}, + dictWord{9, 0, 523}, + dictWord{9, 0, 750}, + dictWord{10, 0, 310}, + dictWord{10, 0, 836}, + dictWord{11, 0, 42}, + dictWord{11, 0, 318}, + dictWord{11, 0, 731}, + dictWord{12, 0, 68}, + dictWord{12, 0, 92}, + dictWord{12, 0, 507}, + dictWord{12, 0, 692}, + dictWord{13, 0, 81}, + dictWord{ + 13, + 0, + 238, + }, + dictWord{13, 0, 374}, + dictWord{14, 0, 436}, + dictWord{18, 0, 138}, + dictWord{19, 0, 78}, + dictWord{19, 0, 111}, + dictWord{20, 0, 55}, + dictWord{20, 0, 77}, + dictWord{148, 0, 92}, + dictWord{141, 0, 418}, + dictWord{4, 0, 938}, + dictWord{137, 0, 625}, + dictWord{138, 0, 351}, + dictWord{5, 11, 843}, + dictWord{7, 10, 32}, + dictWord{ + 7, + 10, + 984, + }, + dictWord{8, 10, 85}, + dictWord{8, 10, 709}, + dictWord{9, 10, 579}, + dictWord{9, 10, 847}, + dictWord{9, 10, 856}, + dictWord{10, 10, 799}, + dictWord{ + 11, + 10, + 258, + }, + dictWord{11, 10, 1007}, + dictWord{12, 10, 331}, + dictWord{12, 10, 615}, + dictWord{13, 10, 188}, + dictWord{13, 10, 435}, + dictWord{14, 10, 8}, + dictWord{ + 15, + 10, + 165, + }, + dictWord{16, 10, 27}, + dictWord{148, 10, 40}, + dictWord{6, 0, 1668}, + dictWord{7, 0, 1499}, + dictWord{8, 0, 117}, + dictWord{9, 0, 314}, + dictWord{ + 138, + 0, + 174, + }, + dictWord{135, 0, 707}, + dictWord{132, 11, 554}, + dictWord{133, 11, 536}, + dictWord{5, 0, 403}, + dictWord{5, 11, 207}, + dictWord{9, 11, 79}, + dictWord{ + 11, + 11, + 625, + }, + dictWord{145, 11, 7}, + dictWord{132, 11, 424}, + dictWord{136, 11, 785}, + dictWord{4, 10, 167}, + dictWord{135, 10, 82}, + dictWord{9, 0, 7}, + dictWord{ + 23, + 0, + 6, + }, + dictWord{9, 11, 7}, + dictWord{151, 11, 6}, + dictWord{6, 0, 282}, + dictWord{5, 10, 62}, + dictWord{6, 10, 534}, + dictWord{7, 10, 74}, + dictWord{7, 10, 678}, + dictWord{ + 7, + 10, + 684, + }, + dictWord{7, 10, 1043}, + dictWord{7, 10, 1072}, + dictWord{8, 10, 280}, + dictWord{8, 10, 541}, + dictWord{8, 10, 686}, + dictWord{9, 10, 258}, + dictWord{ + 10, + 10, + 519, + }, + dictWord{11, 10, 252}, + dictWord{140, 10, 282}, + dictWord{138, 10, 33}, + dictWord{132, 10, 359}, + dictWord{4, 0, 44}, + dictWord{5, 0, 311}, + dictWord{ + 6, + 0, + 156, + }, + dictWord{7, 0, 639}, + dictWord{7, 0, 762}, + dictWord{7, 0, 1827}, + dictWord{9, 0, 8}, + dictWord{9, 0, 462}, + dictWord{148, 0, 83}, + dictWord{7, 11, 769}, + dictWord{ + 9, + 11, + 18, + }, + dictWord{138, 11, 358}, + dictWord{4, 0, 346}, + dictWord{7, 0, 115}, + dictWord{9, 0, 180}, + dictWord{9, 0, 456}, + dictWord{10, 0, 363}, + dictWord{ + 4, + 11, + 896, + }, + dictWord{134, 11, 1777}, + dictWord{133, 10, 211}, + dictWord{7, 0, 761}, + dictWord{7, 0, 1051}, + dictWord{137, 0, 545}, + dictWord{6, 10, 145}, + dictWord{ + 141, + 10, + 336, + }, + dictWord{7, 11, 750}, + dictWord{9, 11, 223}, + dictWord{11, 11, 27}, + dictWord{11, 11, 466}, + dictWord{12, 11, 624}, + dictWord{14, 11, 265}, + dictWord{146, 11, 61}, + dictWord{6, 0, 752}, + dictWord{6, 0, 768}, + dictWord{6, 0, 1195}, + dictWord{6, 0, 1254}, + dictWord{6, 0, 1619}, + dictWord{137, 0, 835}, + dictWord{ + 6, + 0, + 1936, + }, + dictWord{8, 0, 930}, + dictWord{136, 0, 960}, + dictWord{132, 10, 263}, + dictWord{132, 11, 249}, + dictWord{12, 0, 653}, + dictWord{132, 10, 916}, + dictWord{4, 11, 603}, + dictWord{133, 11, 661}, + dictWord{8, 0, 344}, + dictWord{4, 11, 11}, + dictWord{6, 11, 128}, + dictWord{7, 11, 231}, + dictWord{7, 11, 1533}, + dictWord{138, 11, 725}, + dictWord{134, 0, 1483}, + dictWord{134, 0, 875}, + dictWord{6, 0, 185}, + dictWord{7, 0, 1899}, + dictWord{9, 0, 875}, + dictWord{139, 0, 673}, + dictWord{15, 10, 155}, + dictWord{144, 10, 79}, + dictWord{7, 0, 93}, + dictWord{7, 0, 210}, + dictWord{7, 0, 1223}, + dictWord{8, 0, 451}, + dictWord{8, 0, 460}, + dictWord{ + 11, + 0, + 353, + }, + dictWord{11, 0, 475}, + dictWord{4, 10, 599}, + dictWord{6, 10, 1634}, + dictWord{7, 10, 67}, + dictWord{7, 10, 691}, + dictWord{7, 10, 979}, + dictWord{ + 7, + 10, + 1697, + }, + dictWord{8, 10, 207}, + dictWord{8, 10, 214}, + dictWord{8, 10, 231}, + dictWord{8, 10, 294}, + dictWord{8, 10, 336}, + dictWord{8, 10, 428}, + dictWord{ + 8, + 10, + 471, + }, + dictWord{8, 10, 622}, + dictWord{8, 10, 626}, + dictWord{8, 10, 679}, + dictWord{8, 10, 759}, + dictWord{8, 10, 829}, + dictWord{9, 10, 11}, + dictWord{9, 10, 246}, + dictWord{9, 10, 484}, + dictWord{9, 10, 573}, + dictWord{9, 10, 706}, + dictWord{9, 10, 762}, + dictWord{9, 10, 798}, + dictWord{9, 10, 855}, + dictWord{9, 10, 870}, + dictWord{ + 9, + 10, + 912, + }, + dictWord{10, 10, 303}, + dictWord{10, 10, 335}, + dictWord{10, 10, 424}, + dictWord{10, 10, 461}, + dictWord{10, 10, 543}, + dictWord{10, 10, 759}, + dictWord{10, 10, 814}, + dictWord{11, 10, 59}, + dictWord{11, 10, 235}, + dictWord{11, 10, 590}, + dictWord{11, 10, 929}, + dictWord{11, 10, 963}, + dictWord{ + 11, + 10, + 987, + }, + dictWord{12, 10, 114}, + dictWord{12, 10, 182}, + dictWord{12, 10, 226}, + dictWord{12, 10, 332}, + dictWord{12, 10, 439}, + dictWord{12, 10, 575}, + dictWord{ + 12, + 10, + 598, + }, + dictWord{12, 10, 675}, + dictWord{13, 10, 8}, + dictWord{13, 10, 125}, + dictWord{13, 10, 194}, + dictWord{13, 10, 287}, + dictWord{14, 10, 197}, + dictWord{14, 10, 383}, + dictWord{15, 10, 53}, + dictWord{17, 10, 63}, + dictWord{19, 10, 46}, + dictWord{19, 10, 98}, + dictWord{19, 10, 106}, + dictWord{148, 10, 85}, + dictWord{132, 11, 476}, + dictWord{4, 0, 327}, + dictWord{5, 0, 478}, + dictWord{7, 0, 1332}, + dictWord{136, 0, 753}, + dictWord{5, 0, 1020}, + dictWord{133, 0, 1022}, + dictWord{135, 11, 1807}, + dictWord{4, 0, 103}, + dictWord{133, 0, 401}, + dictWord{4, 0, 499}, + dictWord{135, 0, 1421}, + dictWord{10, 0, 207}, + dictWord{13, 0, 164}, + dictWord{147, 10, 126}, + dictWord{9, 11, 20}, + dictWord{10, 11, 324}, + dictWord{139, 11, 488}, + dictWord{132, 0, 96}, + dictWord{9, 11, 280}, + dictWord{ + 138, + 11, + 134, + }, + dictWord{135, 0, 968}, + dictWord{133, 10, 187}, + dictWord{135, 10, 1286}, + dictWord{5, 11, 112}, + dictWord{6, 11, 103}, + dictWord{134, 11, 150}, + dictWord{8, 0, 914}, + dictWord{10, 0, 3}, + dictWord{4, 10, 215}, + dictWord{9, 10, 38}, + dictWord{11, 10, 23}, + dictWord{11, 10, 127}, + dictWord{139, 10, 796}, + dictWord{ + 135, + 0, + 399, + }, + dictWord{6, 0, 563}, + dictWord{137, 0, 224}, + dictWord{6, 0, 704}, + dictWord{134, 0, 1214}, + dictWord{4, 11, 708}, + dictWord{8, 11, 15}, + dictWord{ + 9, + 11, + 50, + }, + dictWord{9, 11, 386}, + dictWord{11, 11, 18}, + dictWord{11, 11, 529}, + dictWord{140, 11, 228}, + dictWord{4, 11, 563}, + dictWord{7, 11, 109}, + dictWord{ + 7, + 11, + 592, + }, + dictWord{7, 11, 637}, + dictWord{7, 11, 770}, + dictWord{7, 11, 1701}, + dictWord{8, 11, 436}, + dictWord{8, 11, 463}, + dictWord{9, 11, 60}, + dictWord{9, 11, 335}, + dictWord{9, 11, 904}, + dictWord{10, 11, 73}, + dictWord{11, 11, 434}, + dictWord{12, 11, 585}, + dictWord{13, 11, 331}, + dictWord{18, 11, 110}, + dictWord{ + 148, + 11, + 60, + }, + dictWord{134, 0, 1559}, + dictWord{132, 11, 502}, + dictWord{6, 11, 347}, + dictWord{138, 11, 161}, + dictWord{4, 11, 33}, + dictWord{5, 11, 102}, + dictWord{ + 5, + 11, + 500, + }, + dictWord{6, 11, 284}, + dictWord{7, 11, 1079}, + dictWord{7, 11, 1423}, + dictWord{7, 11, 1702}, + dictWord{8, 11, 470}, + dictWord{9, 11, 554}, + dictWord{ + 9, + 11, + 723, + }, + dictWord{139, 11, 333}, + dictWord{7, 11, 246}, + dictWord{135, 11, 840}, + dictWord{6, 11, 10}, + dictWord{8, 11, 571}, + dictWord{9, 11, 739}, + dictWord{ + 143, + 11, + 91, + }, + dictWord{8, 0, 861}, + dictWord{10, 0, 905}, + dictWord{12, 0, 730}, + dictWord{12, 0, 789}, + dictWord{133, 11, 626}, + dictWord{134, 0, 946}, + dictWord{ + 5, + 0, + 746, + }, + dictWord{12, 0, 333}, + dictWord{14, 0, 332}, + dictWord{12, 11, 333}, + dictWord{142, 11, 332}, + dictWord{5, 11, 18}, + dictWord{6, 11, 526}, + dictWord{ + 13, + 11, + 24, + }, + dictWord{13, 11, 110}, + dictWord{19, 11, 5}, + dictWord{147, 11, 44}, + dictWord{4, 0, 910}, + dictWord{5, 0, 832}, + dictWord{135, 10, 2002}, + dictWord{ + 10, + 11, + 768, + }, + dictWord{139, 11, 787}, + dictWord{4, 11, 309}, + dictWord{5, 11, 462}, + dictWord{7, 11, 970}, + dictWord{135, 11, 1097}, + dictWord{4, 10, 28}, + dictWord{ + 5, + 10, + 440, + }, + dictWord{7, 10, 248}, + dictWord{11, 10, 833}, + dictWord{140, 10, 344}, + dictWord{134, 10, 1654}, + dictWord{6, 0, 632}, + dictWord{6, 0, 652}, + dictWord{ + 6, + 0, + 1272, + }, + dictWord{6, 0, 1384}, + dictWord{134, 0, 1560}, + dictWord{134, 11, 1704}, + dictWord{6, 0, 1393}, + dictWord{133, 10, 853}, + dictWord{6, 10, 249}, + dictWord{7, 10, 1234}, + dictWord{139, 10, 573}, + dictWord{5, 11, 86}, + dictWord{7, 11, 743}, + dictWord{9, 11, 85}, + dictWord{10, 11, 281}, + dictWord{10, 11, 432}, + dictWord{11, 11, 490}, + dictWord{12, 11, 251}, + dictWord{13, 11, 118}, + dictWord{14, 11, 378}, + dictWord{146, 11, 143}, + dictWord{5, 11, 524}, + dictWord{ + 133, + 11, + 744, + }, + dictWord{134, 0, 1514}, + dictWord{10, 0, 201}, + dictWord{142, 0, 319}, + dictWord{7, 0, 717}, + dictWord{10, 0, 510}, + dictWord{7, 10, 392}, + dictWord{ + 8, + 10, + 20, + }, + dictWord{8, 10, 172}, + dictWord{8, 10, 690}, + dictWord{9, 10, 383}, + dictWord{9, 10, 845}, + dictWord{11, 10, 293}, + dictWord{11, 10, 832}, + dictWord{ + 11, + 10, + 920, + }, + dictWord{11, 10, 984}, + dictWord{141, 10, 221}, + dictWord{134, 0, 1381}, + dictWord{5, 10, 858}, + dictWord{133, 10, 992}, + dictWord{8, 0, 528}, + dictWord{137, 0, 348}, + dictWord{10, 11, 107}, + dictWord{140, 11, 436}, + dictWord{4, 0, 20}, + dictWord{133, 0, 616}, + dictWord{134, 0, 1251}, + dictWord{ + 132, + 11, + 927, + }, + dictWord{10, 11, 123}, + dictWord{12, 11, 670}, + dictWord{13, 11, 371}, + dictWord{14, 11, 142}, + dictWord{146, 11, 94}, + dictWord{134, 0, 1163}, + dictWord{ + 7, + 11, + 1149, + }, + dictWord{137, 11, 156}, + dictWord{134, 0, 307}, + dictWord{133, 11, 778}, + dictWord{7, 0, 1091}, + dictWord{135, 0, 1765}, + dictWord{ + 5, + 11, + 502, + }, + dictWord{6, 10, 268}, + dictWord{137, 10, 62}, + dictWord{8, 11, 196}, + dictWord{10, 11, 283}, + dictWord{139, 11, 406}, + dictWord{4, 0, 26}, + dictWord{ + 5, + 0, + 429, + }, + dictWord{6, 0, 245}, + dictWord{7, 0, 704}, + dictWord{7, 0, 1379}, + dictWord{135, 0, 1474}, + dictWord{133, 11, 855}, + dictWord{132, 0, 881}, + dictWord{ + 4, + 0, + 621, + }, + dictWord{135, 11, 1596}, + dictWord{7, 11, 1400}, + dictWord{9, 11, 446}, + dictWord{138, 11, 45}, + dictWord{6, 0, 736}, + dictWord{138, 10, 106}, + dictWord{133, 0, 542}, + dictWord{134, 0, 348}, + dictWord{133, 0, 868}, + dictWord{136, 0, 433}, + dictWord{135, 0, 1495}, + dictWord{138, 0, 771}, + dictWord{ + 6, + 10, + 613, + }, + dictWord{136, 10, 223}, + dictWord{138, 0, 215}, + dictWord{141, 0, 124}, + dictWord{136, 11, 391}, + dictWord{135, 11, 172}, + dictWord{132, 10, 670}, + dictWord{140, 0, 55}, + dictWord{9, 10, 40}, + dictWord{139, 10, 136}, + dictWord{7, 0, 62}, + dictWord{147, 0, 112}, + dictWord{132, 0, 856}, + dictWord{132, 11, 568}, + dictWord{12, 0, 270}, + dictWord{139, 10, 259}, + dictWord{8, 0, 572}, + dictWord{137, 0, 698}, + dictWord{4, 11, 732}, + dictWord{9, 10, 310}, + dictWord{137, 10, 682}, + dictWord{142, 10, 296}, + dictWord{134, 0, 939}, + dictWord{136, 11, 733}, + dictWord{135, 11, 1435}, + dictWord{7, 10, 1401}, + dictWord{135, 10, 1476}, + dictWord{6, 0, 352}, + dictWord{4, 10, 296}, + dictWord{7, 10, 401}, + dictWord{7, 10, 1410}, + dictWord{7, 10, 1594}, + dictWord{7, 10, 1674}, + dictWord{8, 10, 63}, + dictWord{ + 8, + 10, + 660, + }, + dictWord{137, 10, 74}, + dictWord{4, 11, 428}, + dictWord{133, 11, 668}, + dictWord{4, 10, 139}, + dictWord{4, 10, 388}, + dictWord{140, 10, 188}, + dictWord{7, 11, 2015}, + dictWord{140, 11, 665}, + dictWord{132, 0, 647}, + dictWord{146, 0, 10}, + dictWord{138, 0, 220}, + dictWord{142, 0, 464}, + dictWord{ + 132, + 0, + 109, + }, + dictWord{134, 0, 1746}, + dictWord{6, 0, 515}, + dictWord{4, 10, 747}, + dictWord{6, 11, 1623}, + dictWord{6, 11, 1681}, + dictWord{7, 10, 649}, + dictWord{ + 7, + 10, + 1479, + }, + dictWord{135, 10, 1583}, + dictWord{133, 10, 232}, + dictWord{135, 0, 566}, + dictWord{137, 10, 887}, + dictWord{4, 0, 40}, + dictWord{10, 0, 67}, + dictWord{ + 11, + 0, + 117, + }, + dictWord{11, 0, 768}, + dictWord{139, 0, 935}, + dictWord{132, 0, 801}, + dictWord{7, 0, 992}, + dictWord{8, 0, 301}, + dictWord{9, 0, 722}, + dictWord{ + 12, + 0, + 63, + }, + dictWord{13, 0, 29}, + dictWord{14, 0, 161}, + dictWord{143, 0, 18}, + dictWord{139, 0, 923}, + dictWord{6, 11, 1748}, + dictWord{8, 11, 715}, + dictWord{9, 11, 802}, + dictWord{10, 11, 46}, + dictWord{10, 11, 819}, + dictWord{13, 11, 308}, + dictWord{14, 11, 351}, + dictWord{14, 11, 363}, + dictWord{146, 11, 67}, + dictWord{ + 137, + 11, + 745, + }, + dictWord{7, 0, 1145}, + dictWord{4, 10, 14}, + dictWord{7, 10, 1801}, + dictWord{10, 10, 748}, + dictWord{141, 10, 458}, + dictWord{4, 11, 63}, + dictWord{ + 5, + 11, + 347, + }, + dictWord{134, 11, 474}, + dictWord{135, 0, 568}, + dictWord{4, 10, 425}, + dictWord{7, 11, 577}, + dictWord{7, 11, 1432}, + dictWord{9, 11, 475}, + dictWord{ + 9, + 11, + 505, + }, + dictWord{9, 11, 526}, + dictWord{9, 11, 609}, + dictWord{9, 11, 689}, + dictWord{9, 11, 726}, + dictWord{9, 11, 735}, + dictWord{9, 11, 738}, + dictWord{ + 10, + 11, + 556, + }, + dictWord{10, 11, 674}, + dictWord{10, 11, 684}, + dictWord{11, 11, 89}, + dictWord{11, 11, 202}, + dictWord{11, 11, 272}, + dictWord{11, 11, 380}, + dictWord{ + 11, + 11, + 415, + }, + dictWord{11, 11, 505}, + dictWord{11, 11, 537}, + dictWord{11, 11, 550}, + dictWord{11, 11, 562}, + dictWord{11, 11, 640}, + dictWord{11, 11, 667}, + dictWord{11, 11, 688}, + dictWord{11, 11, 847}, + dictWord{11, 11, 927}, + dictWord{11, 11, 930}, + dictWord{11, 11, 940}, + dictWord{12, 11, 144}, + dictWord{ + 12, + 11, + 325, + }, + dictWord{12, 11, 329}, + dictWord{12, 11, 389}, + dictWord{12, 11, 403}, + dictWord{12, 11, 451}, + dictWord{12, 11, 515}, + dictWord{12, 11, 604}, + dictWord{ + 12, + 11, + 616, + }, + dictWord{12, 11, 626}, + dictWord{13, 11, 66}, + dictWord{13, 11, 131}, + dictWord{13, 11, 167}, + dictWord{13, 11, 236}, + dictWord{13, 11, 368}, + dictWord{13, 11, 411}, + dictWord{13, 11, 434}, + dictWord{13, 11, 453}, + dictWord{13, 11, 461}, + dictWord{13, 11, 474}, + dictWord{14, 11, 59}, + dictWord{14, 11, 60}, + dictWord{14, 11, 139}, + dictWord{14, 11, 152}, + dictWord{14, 11, 276}, + dictWord{14, 11, 353}, + dictWord{14, 11, 402}, + dictWord{15, 11, 28}, + dictWord{ + 15, + 11, + 81, + }, + dictWord{15, 11, 123}, + dictWord{15, 11, 152}, + dictWord{18, 11, 136}, + dictWord{148, 11, 88}, + dictWord{137, 0, 247}, + dictWord{135, 11, 1622}, + dictWord{ + 9, + 11, + 544, + }, + dictWord{11, 11, 413}, + dictWord{144, 11, 25}, + dictWord{4, 0, 645}, + dictWord{7, 0, 825}, + dictWord{6, 10, 1768}, + dictWord{135, 11, 89}, + dictWord{140, 0, 328}, + dictWord{5, 10, 943}, + dictWord{134, 10, 1779}, + dictWord{134, 0, 1363}, + dictWord{5, 10, 245}, + dictWord{6, 10, 576}, + dictWord{7, 10, 582}, + dictWord{136, 10, 225}, + dictWord{134, 0, 1280}, + dictWord{5, 11, 824}, + dictWord{133, 11, 941}, + dictWord{7, 11, 440}, + dictWord{8, 11, 230}, + dictWord{ + 139, + 11, + 106, + }, + dictWord{5, 0, 28}, + dictWord{6, 0, 204}, + dictWord{10, 0, 320}, + dictWord{10, 0, 583}, + dictWord{13, 0, 502}, + dictWord{14, 0, 72}, + dictWord{14, 0, 274}, + dictWord{14, 0, 312}, + dictWord{14, 0, 344}, + dictWord{15, 0, 159}, + dictWord{16, 0, 62}, + dictWord{16, 0, 69}, + dictWord{17, 0, 30}, + dictWord{18, 0, 42}, + dictWord{ + 18, + 0, + 53, + }, + dictWord{18, 0, 84}, + dictWord{18, 0, 140}, + dictWord{19, 0, 68}, + dictWord{19, 0, 85}, + dictWord{20, 0, 5}, + dictWord{20, 0, 45}, + dictWord{20, 0, 101}, + dictWord{ + 22, + 0, + 7, + }, + dictWord{150, 0, 20}, + dictWord{4, 0, 558}, + dictWord{6, 0, 390}, + dictWord{7, 0, 162}, + dictWord{7, 0, 689}, + dictWord{9, 0, 360}, + dictWord{138, 0, 653}, + dictWord{134, 0, 764}, + dictWord{6, 0, 862}, + dictWord{137, 0, 833}, + dictWord{5, 0, 856}, + dictWord{6, 0, 1672}, + dictWord{6, 0, 1757}, + dictWord{134, 0, 1781}, + dictWord{ + 5, + 0, + 92, + }, + dictWord{10, 0, 736}, + dictWord{140, 0, 102}, + dictWord{6, 0, 1927}, + dictWord{6, 0, 1944}, + dictWord{8, 0, 924}, + dictWord{8, 0, 948}, + dictWord{ + 10, + 0, + 967, + }, + dictWord{138, 0, 978}, + dictWord{134, 0, 1479}, + dictWord{5, 0, 590}, + dictWord{8, 0, 360}, + dictWord{9, 0, 213}, + dictWord{138, 0, 63}, + dictWord{ + 134, + 0, + 1521, + }, + dictWord{6, 0, 709}, + dictWord{134, 0, 891}, + dictWord{132, 10, 443}, + dictWord{13, 0, 477}, + dictWord{14, 0, 120}, + dictWord{148, 0, 61}, + dictWord{ + 4, + 11, + 914, + }, + dictWord{5, 11, 800}, + dictWord{133, 11, 852}, + dictWord{10, 11, 54}, + dictWord{141, 11, 115}, + dictWord{4, 11, 918}, + dictWord{133, 11, 876}, + dictWord{139, 11, 152}, + dictWord{4, 11, 92}, + dictWord{133, 11, 274}, + dictWord{135, 11, 1901}, + dictWord{9, 11, 800}, + dictWord{10, 11, 693}, + dictWord{ + 11, + 11, + 482, + }, + dictWord{11, 11, 734}, + dictWord{139, 11, 789}, + dictWord{9, 0, 483}, + dictWord{132, 10, 298}, + dictWord{6, 0, 1213}, + dictWord{141, 11, 498}, + dictWord{135, 11, 1451}, + dictWord{133, 11, 743}, + dictWord{4, 0, 1022}, + dictWord{10, 0, 1000}, + dictWord{12, 0, 957}, + dictWord{12, 0, 980}, + dictWord{ + 12, + 0, + 1013, + }, + dictWord{14, 0, 481}, + dictWord{144, 0, 116}, + dictWord{8, 0, 503}, + dictWord{17, 0, 29}, + dictWord{4, 11, 49}, + dictWord{7, 11, 280}, + dictWord{ + 135, + 11, + 1633, + }, + dictWord{135, 0, 1712}, + dictWord{134, 0, 466}, + dictWord{136, 11, 47}, + dictWord{5, 10, 164}, + dictWord{7, 10, 121}, + dictWord{142, 10, 189}, + dictWord{ + 7, + 10, + 812, + }, + dictWord{7, 10, 1261}, + dictWord{7, 10, 1360}, + dictWord{9, 10, 632}, + dictWord{140, 10, 352}, + dictWord{139, 10, 556}, + dictWord{132, 0, 731}, + dictWord{5, 11, 272}, + dictWord{5, 11, 908}, + dictWord{5, 11, 942}, + dictWord{7, 11, 1008}, + dictWord{7, 11, 1560}, + dictWord{8, 11, 197}, + dictWord{9, 11, 47}, + dictWord{11, 11, 538}, + dictWord{139, 11, 742}, + dictWord{4, 10, 172}, + dictWord{9, 10, 611}, + dictWord{10, 10, 436}, + dictWord{12, 10, 673}, + dictWord{ + 141, + 10, + 255, + }, + dictWord{133, 10, 844}, + dictWord{10, 0, 484}, + dictWord{11, 0, 754}, + dictWord{12, 0, 457}, + dictWord{14, 0, 171}, + dictWord{14, 0, 389}, + dictWord{ + 146, + 0, + 153, + }, + dictWord{9, 10, 263}, + dictWord{10, 10, 147}, + dictWord{138, 10, 492}, + dictWord{137, 11, 891}, + dictWord{138, 0, 241}, + dictWord{133, 10, 537}, + dictWord{6, 0, 2005}, + dictWord{136, 0, 964}, + dictWord{137, 10, 842}, + dictWord{151, 11, 8}, + dictWord{4, 11, 407}, + dictWord{132, 11, 560}, + dictWord{ + 135, + 11, + 1884, + }, + dictWord{6, 0, 1100}, + dictWord{134, 0, 1242}, + dictWord{135, 0, 954}, + dictWord{5, 10, 230}, + dictWord{5, 10, 392}, + dictWord{6, 10, 420}, + dictWord{ + 9, + 10, + 568, + }, + dictWord{140, 10, 612}, + dictWord{4, 11, 475}, + dictWord{11, 11, 35}, + dictWord{11, 11, 90}, + dictWord{13, 11, 7}, + dictWord{13, 11, 71}, + dictWord{ + 13, + 11, + 177, + }, + dictWord{142, 11, 422}, + dictWord{136, 11, 332}, + dictWord{135, 0, 1958}, + dictWord{6, 0, 549}, + dictWord{8, 0, 34}, + dictWord{8, 0, 283}, + dictWord{ + 9, + 0, + 165, + }, + dictWord{138, 0, 475}, + dictWord{10, 0, 952}, + dictWord{12, 0, 966}, + dictWord{140, 0, 994}, + dictWord{5, 0, 652}, + dictWord{5, 0, 701}, + dictWord{ + 135, + 0, + 449, + }, + dictWord{4, 0, 655}, + dictWord{7, 0, 850}, + dictWord{17, 0, 75}, + dictWord{146, 0, 137}, + dictWord{4, 0, 146}, + dictWord{7, 0, 1618}, + dictWord{8, 0, 670}, + dictWord{ + 5, + 10, + 41, + }, + dictWord{7, 10, 1459}, + dictWord{7, 10, 1469}, + dictWord{7, 10, 1859}, + dictWord{9, 10, 549}, + dictWord{139, 10, 905}, + dictWord{133, 10, 696}, + dictWord{6, 0, 159}, + dictWord{6, 0, 364}, + dictWord{7, 0, 516}, + dictWord{137, 0, 518}, + dictWord{135, 0, 1439}, + dictWord{6, 11, 222}, + dictWord{7, 11, 636}, + dictWord{ + 7, + 11, + 1620, + }, + dictWord{8, 11, 409}, + dictWord{9, 11, 693}, + dictWord{139, 11, 77}, + dictWord{13, 0, 151}, + dictWord{141, 11, 45}, + dictWord{6, 0, 1027}, + dictWord{ + 4, + 11, + 336, + }, + dictWord{132, 10, 771}, + dictWord{139, 11, 392}, + dictWord{10, 11, 121}, + dictWord{11, 11, 175}, + dictWord{149, 11, 16}, + dictWord{8, 0, 950}, + dictWord{138, 0, 983}, + dictWord{133, 10, 921}, + dictWord{135, 0, 993}, + dictWord{6, 10, 180}, + dictWord{7, 10, 1137}, + dictWord{8, 10, 751}, + dictWord{ + 139, + 10, + 805, + }, + dictWord{7, 0, 501}, + dictWord{9, 0, 111}, + dictWord{10, 0, 141}, + dictWord{11, 0, 332}, + dictWord{13, 0, 43}, + dictWord{13, 0, 429}, + dictWord{14, 0, 130}, + dictWord{14, 0, 415}, + dictWord{145, 0, 102}, + dictWord{4, 10, 183}, + dictWord{5, 11, 882}, + dictWord{7, 10, 271}, + dictWord{11, 10, 824}, + dictWord{11, 10, 952}, + dictWord{13, 10, 278}, + dictWord{13, 10, 339}, + dictWord{13, 10, 482}, + dictWord{14, 10, 424}, + dictWord{148, 10, 99}, + dictWord{4, 10, 19}, + dictWord{5, 10, 477}, + dictWord{5, 10, 596}, + dictWord{6, 10, 505}, + dictWord{7, 10, 1221}, + dictWord{11, 10, 907}, + dictWord{12, 10, 209}, + dictWord{141, 10, 214}, + dictWord{ + 135, + 10, + 1215, + }, + dictWord{133, 0, 452}, + dictWord{132, 11, 426}, + dictWord{5, 0, 149}, + dictWord{136, 0, 233}, + dictWord{133, 0, 935}, + dictWord{6, 11, 58}, + dictWord{ + 7, + 11, + 654, + }, + dictWord{7, 11, 745}, + dictWord{7, 11, 1969}, + dictWord{8, 11, 240}, + dictWord{8, 11, 675}, + dictWord{9, 11, 479}, + dictWord{9, 11, 731}, + dictWord{ + 10, + 11, + 330, + }, + dictWord{10, 11, 593}, + dictWord{10, 11, 817}, + dictWord{11, 11, 32}, + dictWord{11, 11, 133}, + dictWord{11, 11, 221}, + dictWord{145, 11, 68}, + dictWord{ + 12, + 0, + 582, + }, + dictWord{18, 0, 131}, + dictWord{7, 11, 102}, + dictWord{137, 11, 538}, + dictWord{136, 0, 801}, + dictWord{134, 10, 1645}, + dictWord{132, 0, 70}, + dictWord{6, 10, 92}, + dictWord{6, 10, 188}, + dictWord{7, 10, 1269}, + dictWord{7, 10, 1524}, + dictWord{7, 10, 1876}, + dictWord{10, 10, 228}, + dictWord{139, 10, 1020}, + dictWord{4, 10, 459}, + dictWord{133, 10, 966}, + dictWord{138, 0, 369}, + dictWord{16, 0, 36}, + dictWord{140, 10, 330}, + dictWord{141, 11, 366}, + dictWord{ + 7, + 0, + 721, + }, + dictWord{10, 0, 236}, + dictWord{12, 0, 204}, + dictWord{6, 10, 18}, + dictWord{7, 10, 932}, + dictWord{8, 10, 757}, + dictWord{9, 10, 54}, + dictWord{9, 10, 65}, + dictWord{9, 10, 844}, + dictWord{10, 10, 113}, + dictWord{10, 10, 315}, + dictWord{10, 10, 798}, + dictWord{11, 10, 153}, + dictWord{12, 10, 151}, + dictWord{12, 10, 392}, + dictWord{12, 10, 666}, + dictWord{142, 10, 248}, + dictWord{7, 0, 241}, + dictWord{10, 0, 430}, + dictWord{8, 10, 548}, + dictWord{9, 10, 532}, + dictWord{10, 10, 117}, + dictWord{11, 10, 351}, + dictWord{11, 10, 375}, + dictWord{143, 10, 23}, + dictWord{134, 10, 1742}, + dictWord{133, 10, 965}, + dictWord{133, 11, 566}, + dictWord{ + 6, + 11, + 48, + }, + dictWord{135, 11, 63}, + dictWord{134, 10, 182}, + dictWord{10, 10, 65}, + dictWord{10, 10, 488}, + dictWord{138, 10, 497}, + dictWord{6, 11, 114}, + dictWord{7, 11, 1224}, + dictWord{7, 11, 1556}, + dictWord{136, 11, 3}, + dictWord{134, 0, 1817}, + dictWord{8, 11, 576}, + dictWord{137, 11, 267}, + dictWord{ + 6, + 0, + 1078, + }, + dictWord{144, 0, 16}, + dictWord{9, 10, 588}, + dictWord{138, 10, 260}, + dictWord{138, 0, 1021}, + dictWord{5, 0, 406}, + dictWord{134, 0, 2022}, + dictWord{133, 11, 933}, + dictWord{6, 0, 69}, + dictWord{135, 0, 117}, + dictWord{7, 0, 1830}, + dictWord{136, 11, 427}, + dictWord{4, 0, 432}, + dictWord{135, 0, 824}, + dictWord{134, 10, 1786}, + dictWord{133, 0, 826}, + dictWord{139, 11, 67}, + dictWord{133, 11, 759}, + dictWord{135, 10, 308}, + dictWord{137, 0, 816}, + dictWord{ + 133, + 0, + 1000, + }, + dictWord{4, 0, 297}, + dictWord{6, 0, 529}, + dictWord{7, 0, 152}, + dictWord{7, 0, 713}, + dictWord{7, 0, 1845}, + dictWord{8, 0, 710}, + dictWord{8, 0, 717}, + dictWord{12, 0, 639}, + dictWord{140, 0, 685}, + dictWord{7, 0, 423}, + dictWord{136, 10, 588}, + dictWord{136, 10, 287}, + dictWord{136, 0, 510}, + dictWord{ + 134, + 0, + 1048, + }, + dictWord{6, 0, 618}, + dictWord{7, 11, 56}, + dictWord{7, 11, 1989}, + dictWord{8, 11, 337}, + dictWord{8, 11, 738}, + dictWord{9, 11, 600}, + dictWord{ + 10, + 11, + 483, + }, + dictWord{12, 11, 37}, + dictWord{13, 11, 447}, + dictWord{142, 11, 92}, + dictWord{4, 0, 520}, + dictWord{135, 0, 575}, + dictWord{8, 0, 990}, + dictWord{ + 138, + 0, + 977, + }, + dictWord{135, 11, 774}, + dictWord{9, 11, 347}, + dictWord{11, 11, 24}, + dictWord{140, 11, 170}, + dictWord{136, 11, 379}, + dictWord{140, 10, 290}, + dictWord{132, 11, 328}, + dictWord{4, 0, 321}, + dictWord{134, 0, 569}, + dictWord{4, 11, 101}, + dictWord{135, 11, 1171}, + dictWord{7, 0, 723}, + dictWord{7, 0, 1135}, + dictWord{5, 11, 833}, + dictWord{136, 11, 744}, + dictWord{7, 10, 719}, + dictWord{8, 10, 809}, + dictWord{136, 10, 834}, + dictWord{8, 0, 921}, + dictWord{136, 10, 796}, + dictWord{5, 10, 210}, + dictWord{6, 10, 213}, + dictWord{7, 10, 60}, + dictWord{10, 10, 364}, + dictWord{139, 10, 135}, + dictWord{5, 0, 397}, + dictWord{6, 0, 154}, + dictWord{7, 0, 676}, + dictWord{8, 0, 443}, + dictWord{8, 0, 609}, + dictWord{9, 0, 24}, + dictWord{9, 0, 325}, + dictWord{10, 0, 35}, + dictWord{11, 0, 535}, + dictWord{11, 0, 672}, + dictWord{11, 0, 1018}, + dictWord{12, 0, 637}, + dictWord{16, 0, 30}, + dictWord{5, 10, 607}, + dictWord{8, 10, 326}, + dictWord{136, 10, 490}, + dictWord{4, 10, 701}, + dictWord{5, 10, 472}, + dictWord{6, 11, 9}, + dictWord{6, 11, 397}, + dictWord{7, 11, 53}, + dictWord{7, 11, 1742}, + dictWord{9, 10, 758}, + dictWord{10, 11, 632}, + dictWord{ + 11, + 11, + 828, + }, + dictWord{140, 11, 146}, + dictWord{135, 10, 380}, + dictWord{135, 10, 1947}, + dictWord{148, 11, 109}, + dictWord{10, 10, 278}, + dictWord{ + 138, + 11, + 278, + }, + dictWord{134, 0, 856}, + dictWord{7, 0, 139}, + dictWord{4, 10, 386}, + dictWord{8, 10, 405}, + dictWord{8, 10, 728}, + dictWord{9, 10, 497}, + dictWord{ + 11, + 10, + 110, + }, + dictWord{11, 10, 360}, + dictWord{15, 10, 37}, + dictWord{144, 10, 84}, + dictWord{141, 0, 282}, + dictWord{133, 0, 981}, + dictWord{5, 0, 288}, + dictWord{ + 7, + 10, + 1452, + }, + dictWord{7, 10, 1480}, + dictWord{8, 10, 634}, + dictWord{140, 10, 472}, + dictWord{7, 0, 1890}, + dictWord{8, 11, 367}, + dictWord{10, 11, 760}, + dictWord{ + 14, + 11, + 79, + }, + dictWord{20, 11, 17}, + dictWord{152, 11, 0}, + dictWord{4, 10, 524}, + dictWord{136, 10, 810}, + dictWord{4, 0, 56}, + dictWord{7, 0, 1791}, + dictWord{ + 8, + 0, + 607, + }, + dictWord{8, 0, 651}, + dictWord{11, 0, 465}, + dictWord{11, 0, 835}, + dictWord{12, 0, 337}, + dictWord{141, 0, 480}, + dictWord{10, 10, 238}, + dictWord{ + 141, + 10, + 33, + }, + dictWord{11, 11, 417}, + dictWord{12, 11, 223}, + dictWord{140, 11, 265}, + dictWord{9, 0, 158}, + dictWord{10, 0, 411}, + dictWord{140, 0, 261}, + dictWord{ + 133, + 10, + 532, + }, + dictWord{133, 10, 997}, + dictWord{12, 11, 186}, + dictWord{12, 11, 292}, + dictWord{14, 11, 100}, + dictWord{146, 11, 70}, + dictWord{6, 0, 1403}, + dictWord{136, 0, 617}, + dictWord{134, 0, 1205}, + dictWord{139, 0, 563}, + dictWord{4, 0, 242}, + dictWord{134, 0, 333}, + dictWord{4, 11, 186}, + dictWord{5, 11, 157}, + dictWord{8, 11, 168}, + dictWord{138, 11, 6}, + dictWord{132, 0, 369}, + dictWord{133, 11, 875}, + dictWord{5, 10, 782}, + dictWord{5, 10, 829}, + dictWord{ + 134, + 10, + 1738, + }, + dictWord{134, 0, 622}, + dictWord{135, 11, 1272}, + dictWord{6, 0, 1407}, + dictWord{7, 11, 111}, + dictWord{136, 11, 581}, + dictWord{7, 10, 1823}, + dictWord{139, 10, 693}, + dictWord{7, 0, 160}, + dictWord{10, 0, 624}, + dictWord{142, 0, 279}, + dictWord{132, 0, 363}, + dictWord{10, 11, 589}, + dictWord{12, 11, 111}, + dictWord{13, 11, 260}, + dictWord{14, 11, 82}, + dictWord{18, 11, 63}, + dictWord{147, 11, 45}, + dictWord{7, 11, 1364}, + dictWord{7, 11, 1907}, + dictWord{ + 141, + 11, + 158, + }, + dictWord{4, 11, 404}, + dictWord{4, 11, 659}, + dictWord{135, 11, 675}, + dictWord{13, 11, 211}, + dictWord{14, 11, 133}, + dictWord{14, 11, 204}, + dictWord{ + 15, + 11, + 64, + }, + dictWord{15, 11, 69}, + dictWord{15, 11, 114}, + dictWord{16, 11, 10}, + dictWord{19, 11, 23}, + dictWord{19, 11, 35}, + dictWord{19, 11, 39}, + dictWord{ + 19, + 11, + 51, + }, + dictWord{19, 11, 71}, + dictWord{19, 11, 75}, + dictWord{152, 11, 15}, + dictWord{4, 10, 78}, + dictWord{5, 10, 96}, + dictWord{5, 10, 182}, + dictWord{7, 10, 1724}, + dictWord{7, 10, 1825}, + dictWord{10, 10, 394}, + dictWord{10, 10, 471}, + dictWord{11, 10, 532}, + dictWord{14, 10, 340}, + dictWord{145, 10, 88}, + dictWord{ + 135, + 10, + 1964, + }, + dictWord{133, 11, 391}, + dictWord{11, 11, 887}, + dictWord{14, 11, 365}, + dictWord{142, 11, 375}, + dictWord{5, 11, 540}, + dictWord{6, 11, 1697}, + dictWord{7, 11, 222}, + dictWord{136, 11, 341}, + dictWord{134, 11, 78}, + dictWord{9, 0, 601}, + dictWord{9, 0, 619}, + dictWord{10, 0, 505}, + dictWord{10, 0, 732}, + dictWord{11, 0, 355}, + dictWord{140, 0, 139}, + dictWord{134, 0, 292}, + dictWord{139, 0, 174}, + dictWord{5, 0, 177}, + dictWord{6, 0, 616}, + dictWord{7, 0, 827}, + dictWord{ + 9, + 0, + 525, + }, + dictWord{138, 0, 656}, + dictWord{10, 0, 31}, + dictWord{6, 10, 215}, + dictWord{7, 10, 1028}, + dictWord{7, 10, 1473}, + dictWord{7, 10, 1721}, + dictWord{ + 9, + 10, + 424, + }, + dictWord{138, 10, 779}, + dictWord{135, 10, 584}, + dictWord{136, 11, 293}, + dictWord{134, 0, 685}, + dictWord{135, 11, 1868}, + dictWord{ + 133, + 11, + 460, + }, + dictWord{7, 0, 647}, + dictWord{6, 10, 67}, + dictWord{7, 10, 1630}, + dictWord{9, 10, 354}, + dictWord{9, 10, 675}, + dictWord{10, 10, 830}, + dictWord{ + 14, + 10, + 80, + }, + dictWord{145, 10, 80}, + dictWord{4, 0, 161}, + dictWord{133, 0, 631}, + dictWord{6, 10, 141}, + dictWord{7, 10, 225}, + dictWord{9, 10, 59}, + dictWord{9, 10, 607}, + dictWord{10, 10, 312}, + dictWord{11, 10, 687}, + dictWord{12, 10, 555}, + dictWord{13, 10, 373}, + dictWord{13, 10, 494}, + dictWord{148, 10, 58}, + dictWord{ + 7, + 11, + 965, + }, + dictWord{7, 11, 1460}, + dictWord{135, 11, 1604}, + dictWord{136, 10, 783}, + dictWord{134, 11, 388}, + dictWord{6, 0, 722}, + dictWord{6, 0, 1267}, + dictWord{ + 4, + 11, + 511, + }, + dictWord{9, 11, 333}, + dictWord{9, 11, 379}, + dictWord{10, 11, 602}, + dictWord{11, 11, 441}, + dictWord{11, 11, 723}, + dictWord{11, 11, 976}, + dictWord{140, 11, 357}, + dictWord{134, 0, 1797}, + dictWord{135, 0, 1684}, + dictWord{9, 0, 469}, + dictWord{9, 0, 709}, + dictWord{12, 0, 512}, + dictWord{14, 0, 65}, + dictWord{17, 0, 12}, + dictWord{5, 11, 938}, + dictWord{136, 11, 707}, + dictWord{7, 0, 1230}, + dictWord{136, 0, 531}, + dictWord{10, 0, 229}, + dictWord{11, 0, 73}, + dictWord{ + 11, + 0, + 376, + }, + dictWord{139, 0, 433}, + dictWord{12, 0, 268}, + dictWord{12, 0, 640}, + dictWord{142, 0, 119}, + dictWord{7, 10, 430}, + dictWord{139, 10, 46}, + dictWord{ + 6, + 0, + 558, + }, + dictWord{7, 0, 651}, + dictWord{8, 0, 421}, + dictWord{9, 0, 0}, + dictWord{10, 0, 34}, + dictWord{139, 0, 1008}, + dictWord{6, 0, 106}, + dictWord{7, 0, 1786}, + dictWord{7, 0, 1821}, + dictWord{9, 0, 102}, + dictWord{9, 0, 763}, + dictWord{5, 10, 602}, + dictWord{7, 10, 2018}, + dictWord{137, 10, 418}, + dictWord{5, 0, 65}, + dictWord{ + 6, + 0, + 416, + }, + dictWord{7, 0, 1720}, + dictWord{7, 0, 1924}, + dictWord{10, 0, 109}, + dictWord{11, 0, 14}, + dictWord{11, 0, 70}, + dictWord{11, 0, 569}, + dictWord{11, 0, 735}, + dictWord{15, 0, 153}, + dictWord{20, 0, 80}, + dictWord{136, 10, 677}, + dictWord{135, 11, 1625}, + dictWord{137, 11, 772}, + dictWord{136, 0, 595}, + dictWord{ + 6, + 11, + 469, + }, + dictWord{7, 11, 1709}, + dictWord{138, 11, 515}, + dictWord{7, 0, 1832}, + dictWord{138, 0, 374}, + dictWord{9, 0, 106}, + dictWord{9, 0, 163}, + dictWord{ + 9, + 0, + 296, + }, + dictWord{10, 0, 167}, + dictWord{10, 0, 172}, + dictWord{10, 0, 777}, + dictWord{139, 0, 16}, + dictWord{6, 0, 6}, + dictWord{7, 0, 81}, + dictWord{7, 0, 771}, + dictWord{ + 7, + 0, + 1731, + }, + dictWord{9, 0, 405}, + dictWord{138, 0, 421}, + dictWord{4, 11, 500}, + dictWord{135, 11, 938}, + dictWord{5, 11, 68}, + dictWord{134, 11, 383}, + dictWord{ + 5, + 0, + 881, + }, + dictWord{133, 0, 885}, + dictWord{6, 0, 854}, + dictWord{6, 0, 1132}, + dictWord{6, 0, 1495}, + dictWord{6, 0, 1526}, + dictWord{6, 0, 1533}, + dictWord{ + 134, + 0, + 1577, + }, + dictWord{4, 11, 337}, + dictWord{6, 11, 353}, + dictWord{7, 11, 1934}, + dictWord{8, 11, 488}, + dictWord{137, 11, 429}, + dictWord{7, 11, 236}, + dictWord{ + 7, + 11, + 1795, + }, + dictWord{8, 11, 259}, + dictWord{9, 11, 135}, + dictWord{9, 11, 177}, + dictWord{10, 11, 825}, + dictWord{11, 11, 115}, + dictWord{11, 11, 370}, + dictWord{ + 11, + 11, + 405, + }, + dictWord{11, 11, 604}, + dictWord{12, 11, 10}, + dictWord{12, 11, 667}, + dictWord{12, 11, 669}, + dictWord{13, 11, 76}, + dictWord{14, 11, 310}, + dictWord{15, 11, 76}, + dictWord{15, 11, 147}, + dictWord{148, 11, 23}, + dictWord{5, 0, 142}, + dictWord{134, 0, 546}, + dictWord{4, 11, 15}, + dictWord{5, 11, 22}, + dictWord{ + 6, + 11, + 244, + }, + dictWord{7, 11, 40}, + dictWord{7, 11, 200}, + dictWord{7, 11, 906}, + dictWord{7, 11, 1199}, + dictWord{9, 11, 616}, + dictWord{10, 11, 716}, + dictWord{ + 11, + 11, + 635, + }, + dictWord{11, 11, 801}, + dictWord{140, 11, 458}, + dictWord{5, 0, 466}, + dictWord{11, 0, 571}, + dictWord{12, 0, 198}, + dictWord{13, 0, 283}, + dictWord{ + 14, + 0, + 186, + }, + dictWord{15, 0, 21}, + dictWord{15, 0, 103}, + dictWord{135, 10, 329}, + dictWord{4, 0, 185}, + dictWord{5, 0, 257}, + dictWord{5, 0, 839}, + dictWord{5, 0, 936}, + dictWord{9, 0, 399}, + dictWord{10, 0, 258}, + dictWord{10, 0, 395}, + dictWord{10, 0, 734}, + dictWord{11, 0, 1014}, + dictWord{12, 0, 23}, + dictWord{13, 0, 350}, + dictWord{ + 14, + 0, + 150, + }, + dictWord{19, 0, 6}, + dictWord{135, 11, 1735}, + dictWord{12, 11, 36}, + dictWord{141, 11, 337}, + dictWord{5, 11, 598}, + dictWord{7, 11, 791}, + dictWord{ + 8, + 11, + 108, + }, + dictWord{137, 11, 123}, + dictWord{132, 10, 469}, + dictWord{7, 0, 404}, + dictWord{7, 0, 1377}, + dictWord{7, 0, 1430}, + dictWord{7, 0, 2017}, + dictWord{ + 8, + 0, + 149, + }, + dictWord{8, 0, 239}, + dictWord{8, 0, 512}, + dictWord{8, 0, 793}, + dictWord{8, 0, 818}, + dictWord{9, 0, 474}, + dictWord{9, 0, 595}, + dictWord{10, 0, 122}, + dictWord{10, 0, 565}, + dictWord{10, 0, 649}, + dictWord{10, 0, 783}, + dictWord{11, 0, 239}, + dictWord{11, 0, 295}, + dictWord{11, 0, 447}, + dictWord{11, 0, 528}, + dictWord{ + 11, + 0, + 639, + }, + dictWord{11, 0, 800}, + dictWord{12, 0, 25}, + dictWord{12, 0, 77}, + dictWord{12, 0, 157}, + dictWord{12, 0, 256}, + dictWord{12, 0, 316}, + dictWord{12, 0, 390}, + dictWord{12, 0, 391}, + dictWord{12, 0, 395}, + dictWord{12, 0, 478}, + dictWord{12, 0, 503}, + dictWord{12, 0, 592}, + dictWord{12, 0, 680}, + dictWord{13, 0, 50}, + dictWord{13, 0, 53}, + dictWord{13, 0, 132}, + dictWord{13, 0, 198}, + dictWord{13, 0, 322}, + dictWord{13, 0, 415}, + dictWord{13, 0, 511}, + dictWord{14, 0, 71}, + dictWord{ + 14, + 0, + 395, + }, + dictWord{15, 0, 71}, + dictWord{15, 0, 136}, + dictWord{17, 0, 123}, + dictWord{18, 0, 93}, + dictWord{147, 0, 58}, + dictWord{136, 0, 712}, + dictWord{ + 134, + 10, + 1743, + }, + dictWord{5, 10, 929}, + dictWord{6, 10, 340}, + dictWord{8, 10, 376}, + dictWord{136, 10, 807}, + dictWord{6, 0, 1848}, + dictWord{8, 0, 860}, + dictWord{ + 10, + 0, + 856, + }, + dictWord{10, 0, 859}, + dictWord{10, 0, 925}, + dictWord{10, 0, 941}, + dictWord{140, 0, 762}, + dictWord{6, 0, 629}, + dictWord{6, 0, 906}, + dictWord{9, 0, 810}, + dictWord{140, 0, 652}, + dictWord{5, 10, 218}, + dictWord{7, 10, 1610}, + dictWord{138, 10, 83}, + dictWord{7, 10, 1512}, + dictWord{135, 10, 1794}, + dictWord{ + 4, + 0, + 377, + }, + dictWord{24, 0, 13}, + dictWord{4, 11, 155}, + dictWord{7, 11, 1689}, + dictWord{11, 10, 0}, + dictWord{144, 10, 78}, + dictWord{4, 11, 164}, + dictWord{5, 11, 151}, + dictWord{5, 11, 730}, + dictWord{5, 11, 741}, + dictWord{7, 11, 498}, + dictWord{7, 11, 870}, + dictWord{7, 11, 1542}, + dictWord{12, 11, 213}, + dictWord{14, 11, 36}, + dictWord{14, 11, 391}, + dictWord{17, 11, 111}, + dictWord{18, 11, 6}, + dictWord{18, 11, 46}, + dictWord{18, 11, 151}, + dictWord{19, 11, 36}, + dictWord{20, 11, 32}, + dictWord{20, 11, 56}, + dictWord{20, 11, 69}, + dictWord{20, 11, 102}, + dictWord{21, 11, 4}, + dictWord{22, 11, 8}, + dictWord{22, 11, 10}, + dictWord{22, 11, 14}, + dictWord{ + 150, + 11, + 31, + }, + dictWord{7, 0, 1842}, + dictWord{133, 10, 571}, + dictWord{4, 10, 455}, + dictWord{4, 11, 624}, + dictWord{135, 11, 1752}, + dictWord{134, 0, 1501}, + dictWord{4, 11, 492}, + dictWord{5, 11, 451}, + dictWord{6, 10, 161}, + dictWord{7, 10, 372}, + dictWord{137, 10, 597}, + dictWord{132, 10, 349}, + dictWord{4, 0, 180}, + dictWord{135, 0, 1906}, + dictWord{135, 11, 835}, + dictWord{141, 11, 70}, + dictWord{132, 0, 491}, + dictWord{137, 10, 751}, + dictWord{6, 10, 432}, + dictWord{ + 139, + 10, + 322, + }, + dictWord{4, 0, 171}, + dictWord{138, 0, 234}, + dictWord{6, 11, 113}, + dictWord{135, 11, 436}, + dictWord{4, 0, 586}, + dictWord{7, 0, 1186}, + dictWord{ + 138, + 0, + 631, + }, + dictWord{5, 10, 468}, + dictWord{10, 10, 325}, + dictWord{11, 10, 856}, + dictWord{12, 10, 345}, + dictWord{143, 10, 104}, + dictWord{5, 10, 223}, + dictWord{10, 11, 592}, + dictWord{10, 11, 753}, + dictWord{12, 11, 317}, + dictWord{12, 11, 355}, + dictWord{12, 11, 465}, + dictWord{12, 11, 469}, + dictWord{ + 12, + 11, + 560, + }, + dictWord{12, 11, 578}, + dictWord{141, 11, 243}, + dictWord{132, 10, 566}, + dictWord{135, 11, 520}, + dictWord{4, 10, 59}, + dictWord{135, 10, 1394}, + dictWord{6, 10, 436}, + dictWord{139, 10, 481}, + dictWord{9, 0, 931}, + dictWord{10, 0, 334}, + dictWord{20, 0, 71}, + dictWord{4, 10, 48}, + dictWord{5, 10, 271}, + dictWord{ + 7, + 10, + 953, + }, + dictWord{135, 11, 1878}, + dictWord{11, 0, 170}, + dictWord{5, 10, 610}, + dictWord{136, 10, 457}, + dictWord{133, 10, 755}, + dictWord{6, 0, 1587}, + dictWord{135, 10, 1217}, + dictWord{4, 10, 197}, + dictWord{149, 11, 26}, + dictWord{133, 11, 585}, + dictWord{137, 11, 521}, + dictWord{133, 0, 765}, + dictWord{ + 133, + 10, + 217, + }, + dictWord{139, 11, 586}, + dictWord{133, 0, 424}, + dictWord{9, 11, 752}, + dictWord{12, 11, 610}, + dictWord{13, 11, 431}, + dictWord{16, 11, 59}, + dictWord{146, 11, 109}, + dictWord{136, 0, 714}, + dictWord{7, 0, 685}, + dictWord{132, 11, 307}, + dictWord{9, 0, 420}, + dictWord{10, 0, 269}, + dictWord{10, 0, 285}, + dictWord{10, 0, 576}, + dictWord{11, 0, 397}, + dictWord{13, 0, 175}, + dictWord{145, 0, 90}, + dictWord{132, 0, 429}, + dictWord{133, 11, 964}, + dictWord{9, 11, 463}, + dictWord{138, 11, 595}, + dictWord{7, 0, 18}, + dictWord{7, 0, 699}, + dictWord{7, 0, 1966}, + dictWord{8, 0, 752}, + dictWord{9, 0, 273}, + dictWord{9, 0, 412}, + dictWord{ + 9, + 0, + 703, + }, + dictWord{10, 0, 71}, + dictWord{10, 0, 427}, + dictWord{138, 0, 508}, + dictWord{4, 10, 165}, + dictWord{7, 10, 1398}, + dictWord{135, 10, 1829}, + dictWord{ + 4, + 0, + 53, + }, + dictWord{5, 0, 186}, + dictWord{7, 0, 752}, + dictWord{7, 0, 828}, + dictWord{142, 0, 116}, + dictWord{8, 0, 575}, + dictWord{10, 0, 289}, + dictWord{139, 0, 319}, + dictWord{132, 0, 675}, + dictWord{134, 0, 1424}, + dictWord{4, 11, 75}, + dictWord{5, 11, 180}, + dictWord{6, 11, 500}, + dictWord{7, 11, 58}, + dictWord{7, 11, 710}, + dictWord{138, 11, 645}, + dictWord{133, 11, 649}, + dictWord{6, 11, 276}, + dictWord{7, 11, 282}, + dictWord{7, 11, 879}, + dictWord{7, 11, 924}, + dictWord{8, 11, 459}, + dictWord{9, 11, 599}, + dictWord{9, 11, 754}, + dictWord{11, 11, 574}, + dictWord{12, 11, 128}, + dictWord{12, 11, 494}, + dictWord{13, 11, 52}, + dictWord{13, 11, 301}, + dictWord{15, 11, 30}, + dictWord{143, 11, 132}, + dictWord{6, 0, 647}, + dictWord{134, 0, 1095}, + dictWord{5, 10, 9}, + dictWord{7, 10, 297}, + dictWord{7, 10, 966}, + dictWord{140, 10, 306}, + dictWord{132, 11, 200}, + dictWord{134, 0, 1334}, + dictWord{5, 10, 146}, + dictWord{6, 10, 411}, + dictWord{138, 10, 721}, + dictWord{ + 6, + 0, + 209, + }, + dictWord{6, 0, 1141}, + dictWord{6, 0, 1288}, + dictWord{8, 0, 468}, + dictWord{9, 0, 210}, + dictWord{11, 0, 36}, + dictWord{12, 0, 28}, + dictWord{12, 0, 630}, + dictWord{13, 0, 21}, + dictWord{13, 0, 349}, + dictWord{14, 0, 7}, + dictWord{145, 0, 13}, + dictWord{6, 10, 177}, + dictWord{135, 10, 467}, + dictWord{4, 0, 342}, + dictWord{ + 135, + 0, + 1179, + }, + dictWord{10, 11, 454}, + dictWord{140, 11, 324}, + dictWord{4, 0, 928}, + dictWord{133, 0, 910}, + dictWord{7, 0, 1838}, + dictWord{6, 11, 225}, + dictWord{ + 137, + 11, + 211, + }, + dictWord{16, 0, 101}, + dictWord{20, 0, 115}, + dictWord{20, 0, 118}, + dictWord{148, 0, 122}, + dictWord{4, 0, 496}, + dictWord{135, 0, 856}, + dictWord{ + 4, + 0, + 318, + }, + dictWord{11, 0, 654}, + dictWord{7, 11, 718}, + dictWord{139, 11, 102}, + dictWord{8, 11, 58}, + dictWord{9, 11, 724}, + dictWord{11, 11, 809}, + dictWord{ + 13, + 11, + 113, + }, + dictWord{145, 11, 72}, + dictWord{5, 10, 200}, + dictWord{6, 11, 345}, + dictWord{135, 11, 1247}, + dictWord{8, 11, 767}, + dictWord{8, 11, 803}, + dictWord{ + 9, + 11, + 301, + }, + dictWord{137, 11, 903}, + dictWord{7, 0, 915}, + dictWord{8, 0, 247}, + dictWord{19, 0, 0}, + dictWord{7, 11, 1949}, + dictWord{136, 11, 674}, + dictWord{ + 4, + 0, + 202, + }, + dictWord{5, 0, 382}, + dictWord{6, 0, 454}, + dictWord{7, 0, 936}, + dictWord{7, 0, 1803}, + dictWord{8, 0, 758}, + dictWord{9, 0, 375}, + dictWord{9, 0, 895}, + dictWord{ + 10, + 0, + 743, + }, + dictWord{10, 0, 792}, + dictWord{11, 0, 978}, + dictWord{11, 0, 1012}, + dictWord{142, 0, 109}, + dictWord{7, 0, 1150}, + dictWord{7, 0, 1425}, + dictWord{ + 7, + 0, + 1453, + }, + dictWord{140, 0, 513}, + dictWord{134, 11, 259}, + dictWord{138, 0, 791}, + dictWord{11, 0, 821}, + dictWord{12, 0, 110}, + dictWord{12, 0, 153}, + dictWord{ + 18, + 0, + 41, + }, + dictWord{150, 0, 19}, + dictWord{134, 10, 481}, + dictWord{132, 0, 796}, + dictWord{6, 0, 445}, + dictWord{9, 0, 909}, + dictWord{136, 11, 254}, + dictWord{ + 10, + 0, + 776, + }, + dictWord{13, 0, 345}, + dictWord{142, 0, 425}, + dictWord{4, 10, 84}, + dictWord{7, 10, 1482}, + dictWord{10, 10, 76}, + dictWord{138, 10, 142}, + dictWord{ + 135, + 11, + 742, + }, + dictWord{6, 0, 578}, + dictWord{133, 10, 1015}, + dictWord{6, 0, 1387}, + dictWord{4, 10, 315}, + dictWord{5, 10, 507}, + dictWord{135, 10, 1370}, + dictWord{4, 0, 438}, + dictWord{133, 0, 555}, + dictWord{136, 0, 766}, + dictWord{133, 11, 248}, + dictWord{134, 10, 1722}, + dictWord{4, 11, 116}, + dictWord{5, 11, 95}, + dictWord{5, 11, 445}, + dictWord{7, 11, 1688}, + dictWord{8, 11, 29}, + dictWord{9, 11, 272}, + dictWord{11, 11, 509}, + dictWord{139, 11, 915}, + dictWord{135, 0, 541}, + dictWord{133, 11, 543}, + dictWord{8, 10, 222}, + dictWord{8, 10, 476}, + dictWord{9, 10, 238}, + dictWord{11, 10, 516}, + dictWord{11, 10, 575}, + dictWord{ + 15, + 10, + 109, + }, + dictWord{146, 10, 100}, + dictWord{6, 0, 880}, + dictWord{134, 0, 1191}, + dictWord{5, 11, 181}, + dictWord{136, 11, 41}, + dictWord{134, 0, 1506}, + dictWord{132, 11, 681}, + dictWord{7, 11, 25}, + dictWord{8, 11, 202}, + dictWord{138, 11, 536}, + dictWord{139, 0, 983}, + dictWord{137, 0, 768}, + dictWord{132, 0, 584}, + dictWord{9, 11, 423}, + dictWord{140, 11, 89}, + dictWord{8, 11, 113}, + dictWord{9, 11, 877}, + dictWord{10, 11, 554}, + dictWord{11, 11, 83}, + dictWord{12, 11, 136}, + dictWord{147, 11, 109}, + dictWord{7, 10, 706}, + dictWord{7, 10, 1058}, + dictWord{138, 10, 538}, + dictWord{133, 11, 976}, + dictWord{4, 11, 206}, + dictWord{ + 135, + 11, + 746, + }, + dictWord{136, 11, 526}, + dictWord{140, 0, 737}, + dictWord{11, 10, 92}, + dictWord{11, 10, 196}, + dictWord{11, 10, 409}, + dictWord{11, 10, 450}, + dictWord{11, 10, 666}, + dictWord{11, 10, 777}, + dictWord{12, 10, 262}, + dictWord{13, 10, 385}, + dictWord{13, 10, 393}, + dictWord{15, 10, 115}, + dictWord{ + 16, + 10, + 45, + }, + dictWord{145, 10, 82}, + dictWord{4, 0, 226}, + dictWord{4, 0, 326}, + dictWord{7, 0, 1770}, + dictWord{4, 11, 319}, + dictWord{5, 11, 699}, + dictWord{138, 11, 673}, + dictWord{6, 10, 40}, + dictWord{135, 10, 1781}, + dictWord{5, 0, 426}, + dictWord{8, 0, 30}, + dictWord{9, 0, 2}, + dictWord{11, 0, 549}, + dictWord{147, 0, 122}, + dictWord{ + 6, + 0, + 1161, + }, + dictWord{134, 0, 1329}, + dictWord{138, 10, 97}, + dictWord{6, 10, 423}, + dictWord{7, 10, 665}, + dictWord{135, 10, 1210}, + dictWord{7, 11, 13}, + dictWord{ + 8, + 11, + 226, + }, + dictWord{10, 11, 537}, + dictWord{11, 11, 570}, + dictWord{11, 11, 605}, + dictWord{11, 11, 799}, + dictWord{11, 11, 804}, + dictWord{12, 11, 85}, + dictWord{12, 11, 516}, + dictWord{12, 11, 623}, + dictWord{13, 11, 112}, + dictWord{13, 11, 361}, + dictWord{14, 11, 77}, + dictWord{14, 11, 78}, + dictWord{17, 11, 28}, + dictWord{147, 11, 110}, + dictWord{132, 11, 769}, + dictWord{132, 11, 551}, + dictWord{132, 11, 728}, + dictWord{147, 0, 117}, + dictWord{9, 11, 57}, + dictWord{ + 9, + 11, + 459, + }, + dictWord{10, 11, 425}, + dictWord{11, 11, 119}, + dictWord{12, 11, 184}, + dictWord{12, 11, 371}, + dictWord{13, 11, 358}, + dictWord{145, 11, 51}, + dictWord{ + 5, + 11, + 188, + }, + dictWord{5, 11, 814}, + dictWord{8, 11, 10}, + dictWord{9, 11, 421}, + dictWord{9, 11, 729}, + dictWord{10, 11, 609}, + dictWord{139, 11, 689}, + dictWord{134, 11, 624}, + dictWord{135, 11, 298}, + dictWord{135, 0, 462}, + dictWord{4, 0, 345}, + dictWord{139, 10, 624}, + dictWord{136, 10, 574}, + dictWord{ + 4, + 0, + 385, + }, + dictWord{7, 0, 265}, + dictWord{135, 0, 587}, + dictWord{6, 0, 808}, + dictWord{132, 11, 528}, + dictWord{133, 0, 398}, + dictWord{132, 10, 354}, + dictWord{ + 4, + 0, + 347, + }, + dictWord{5, 0, 423}, + dictWord{5, 0, 996}, + dictWord{135, 0, 1329}, + dictWord{135, 10, 1558}, + dictWord{7, 0, 1259}, + dictWord{9, 0, 125}, + dictWord{ + 139, + 0, + 65, + }, + dictWord{5, 0, 136}, + dictWord{6, 0, 136}, + dictWord{136, 0, 644}, + dictWord{5, 11, 104}, + dictWord{6, 11, 173}, + dictWord{135, 11, 1631}, + dictWord{ + 135, + 0, + 469, + }, + dictWord{133, 10, 830}, + dictWord{4, 0, 278}, + dictWord{5, 0, 465}, + dictWord{135, 0, 1367}, + dictWord{7, 11, 810}, + dictWord{8, 11, 138}, + dictWord{ + 8, + 11, + 342, + }, + dictWord{9, 11, 84}, + dictWord{10, 11, 193}, + dictWord{11, 11, 883}, + dictWord{140, 11, 359}, + dictWord{5, 10, 496}, + dictWord{135, 10, 203}, + dictWord{ + 4, + 0, + 433, + }, + dictWord{133, 0, 719}, + dictWord{6, 11, 95}, + dictWord{134, 10, 547}, + dictWord{5, 10, 88}, + dictWord{137, 10, 239}, + dictWord{6, 11, 406}, + dictWord{ + 10, + 11, + 409, + }, + dictWord{10, 11, 447}, + dictWord{11, 11, 44}, + dictWord{140, 11, 100}, + dictWord{134, 0, 1423}, + dictWord{7, 10, 650}, + dictWord{135, 10, 1310}, + dictWord{134, 0, 749}, + dictWord{135, 11, 1243}, + dictWord{135, 0, 1363}, + dictWord{6, 0, 381}, + dictWord{7, 0, 645}, + dictWord{7, 0, 694}, + dictWord{8, 0, 546}, + dictWord{7, 10, 1076}, + dictWord{9, 10, 80}, + dictWord{11, 10, 78}, + dictWord{11, 10, 421}, + dictWord{11, 10, 534}, + dictWord{140, 10, 545}, + dictWord{ + 134, + 11, + 1636, + }, + dictWord{135, 11, 1344}, + dictWord{12, 0, 277}, + dictWord{7, 10, 274}, + dictWord{11, 10, 479}, + dictWord{139, 10, 507}, + dictWord{6, 0, 705}, + dictWord{ + 6, + 0, + 783, + }, + dictWord{6, 0, 1275}, + dictWord{6, 0, 1481}, + dictWord{4, 11, 282}, + dictWord{7, 11, 1034}, + dictWord{11, 11, 398}, + dictWord{11, 11, 634}, + dictWord{ + 12, + 11, + 1, + }, + dictWord{12, 11, 79}, + dictWord{12, 11, 544}, + dictWord{14, 11, 237}, + dictWord{17, 11, 10}, + dictWord{146, 11, 20}, + dictWord{134, 0, 453}, + dictWord{ + 4, + 0, + 555, + }, + dictWord{8, 0, 536}, + dictWord{10, 0, 288}, + dictWord{11, 0, 1005}, + dictWord{4, 10, 497}, + dictWord{135, 10, 1584}, + dictWord{5, 11, 118}, + dictWord{ + 5, + 11, + 499, + }, + dictWord{6, 11, 476}, + dictWord{7, 11, 600}, + dictWord{7, 11, 888}, + dictWord{135, 11, 1096}, + dictWord{138, 0, 987}, + dictWord{7, 0, 1107}, + dictWord{ + 7, + 10, + 261, + }, + dictWord{7, 10, 1115}, + dictWord{7, 10, 1354}, + dictWord{7, 10, 1588}, + dictWord{7, 10, 1705}, + dictWord{7, 10, 1902}, + dictWord{9, 10, 465}, + dictWord{10, 10, 248}, + dictWord{10, 10, 349}, + dictWord{10, 10, 647}, + dictWord{11, 10, 527}, + dictWord{11, 10, 660}, + dictWord{11, 10, 669}, + dictWord{ + 12, + 10, + 529, + }, + dictWord{141, 10, 305}, + dictWord{7, 11, 296}, + dictWord{7, 11, 596}, + dictWord{8, 11, 560}, + dictWord{8, 11, 586}, + dictWord{9, 11, 612}, + dictWord{ + 11, + 11, + 100, + }, + dictWord{11, 11, 304}, + dictWord{12, 11, 46}, + dictWord{13, 11, 89}, + dictWord{14, 11, 112}, + dictWord{145, 11, 122}, + dictWord{9, 0, 370}, + dictWord{ + 138, + 0, + 90, + }, + dictWord{136, 10, 13}, + dictWord{132, 0, 860}, + dictWord{7, 10, 642}, + dictWord{8, 10, 250}, + dictWord{11, 10, 123}, + dictWord{11, 10, 137}, + dictWord{ + 13, + 10, + 48, + }, + dictWord{142, 10, 95}, + dictWord{135, 10, 1429}, + dictWord{137, 11, 321}, + dictWord{132, 0, 257}, + dictWord{135, 0, 2031}, + dictWord{7, 0, 1768}, + dictWord{7, 11, 1599}, + dictWord{7, 11, 1723}, + dictWord{8, 11, 79}, + dictWord{8, 11, 106}, + dictWord{8, 11, 190}, + dictWord{8, 11, 302}, + dictWord{8, 11, 383}, + dictWord{9, 11, 119}, + dictWord{9, 11, 233}, + dictWord{9, 11, 298}, + dictWord{9, 11, 419}, + dictWord{9, 11, 471}, + dictWord{10, 11, 181}, + dictWord{10, 11, 406}, + dictWord{11, 11, 57}, + dictWord{11, 11, 85}, + dictWord{11, 11, 120}, + dictWord{11, 11, 177}, + dictWord{11, 11, 296}, + dictWord{11, 11, 382}, + dictWord{11, 11, 454}, + dictWord{11, 11, 758}, + dictWord{11, 11, 999}, + dictWord{12, 11, 27}, + dictWord{12, 11, 98}, + dictWord{12, 11, 131}, + dictWord{12, 11, 245}, + dictWord{ + 12, + 11, + 312, + }, + dictWord{12, 11, 446}, + dictWord{12, 11, 454}, + dictWord{13, 11, 25}, + dictWord{13, 11, 98}, + dictWord{13, 11, 426}, + dictWord{13, 11, 508}, + dictWord{ + 14, + 11, + 6, + }, + dictWord{14, 11, 163}, + dictWord{14, 11, 272}, + dictWord{14, 11, 277}, + dictWord{14, 11, 370}, + dictWord{15, 11, 95}, + dictWord{15, 11, 138}, + dictWord{ + 15, + 11, + 167, + }, + dictWord{17, 11, 18}, + dictWord{17, 11, 38}, + dictWord{20, 11, 96}, + dictWord{149, 11, 32}, + dictWord{5, 11, 722}, + dictWord{134, 11, 1759}, + dictWord{145, 11, 16}, + dictWord{6, 0, 1071}, + dictWord{134, 0, 1561}, + dictWord{10, 10, 545}, + dictWord{140, 10, 301}, + dictWord{6, 0, 83}, + dictWord{6, 0, 1733}, + dictWord{135, 0, 1389}, + dictWord{4, 0, 835}, + dictWord{135, 0, 1818}, + dictWord{133, 11, 258}, + dictWord{4, 10, 904}, + dictWord{133, 10, 794}, + dictWord{ + 134, + 0, + 2006, + }, + dictWord{5, 11, 30}, + dictWord{7, 11, 495}, + dictWord{8, 11, 134}, + dictWord{9, 11, 788}, + dictWord{140, 11, 438}, + dictWord{135, 11, 2004}, + dictWord{ + 137, + 0, + 696, + }, + dictWord{5, 11, 50}, + dictWord{6, 11, 439}, + dictWord{7, 11, 780}, + dictWord{135, 11, 1040}, + dictWord{7, 11, 772}, + dictWord{7, 11, 1104}, + dictWord{ + 7, + 11, + 1647, + }, + dictWord{11, 11, 269}, + dictWord{11, 11, 539}, + dictWord{11, 11, 607}, + dictWord{11, 11, 627}, + dictWord{11, 11, 706}, + dictWord{11, 11, 975}, + dictWord{12, 11, 248}, + dictWord{12, 11, 311}, + dictWord{12, 11, 434}, + dictWord{12, 11, 600}, + dictWord{12, 11, 622}, + dictWord{13, 11, 297}, + dictWord{ + 13, + 11, + 367, + }, + dictWord{13, 11, 485}, + dictWord{14, 11, 69}, + dictWord{14, 11, 409}, + dictWord{143, 11, 108}, + dictWord{5, 11, 1}, + dictWord{6, 11, 81}, + dictWord{ + 138, + 11, + 520, + }, + dictWord{7, 0, 1718}, + dictWord{9, 0, 95}, + dictWord{9, 0, 274}, + dictWord{10, 0, 279}, + dictWord{10, 0, 317}, + dictWord{10, 0, 420}, + dictWord{11, 0, 303}, + dictWord{11, 0, 808}, + dictWord{12, 0, 134}, + dictWord{12, 0, 367}, + dictWord{13, 0, 149}, + dictWord{13, 0, 347}, + dictWord{14, 0, 349}, + dictWord{14, 0, 406}, + dictWord{ + 18, + 0, + 22, + }, + dictWord{18, 0, 89}, + dictWord{18, 0, 122}, + dictWord{147, 0, 47}, + dictWord{5, 11, 482}, + dictWord{8, 11, 98}, + dictWord{9, 11, 172}, + dictWord{10, 11, 222}, + dictWord{10, 11, 700}, + dictWord{10, 11, 822}, + dictWord{11, 11, 302}, + dictWord{11, 11, 778}, + dictWord{12, 11, 50}, + dictWord{12, 11, 127}, + dictWord{ + 12, + 11, + 396, + }, + dictWord{13, 11, 62}, + dictWord{13, 11, 328}, + dictWord{14, 11, 122}, + dictWord{147, 11, 72}, + dictWord{7, 10, 386}, + dictWord{138, 10, 713}, + dictWord{ + 6, + 10, + 7, + }, + dictWord{6, 10, 35}, + dictWord{7, 10, 147}, + dictWord{7, 10, 1069}, + dictWord{7, 10, 1568}, + dictWord{7, 10, 1575}, + dictWord{7, 10, 1917}, + dictWord{ + 8, + 10, + 43, + }, + dictWord{8, 10, 208}, + dictWord{9, 10, 128}, + dictWord{9, 10, 866}, + dictWord{10, 10, 20}, + dictWord{11, 10, 981}, + dictWord{147, 10, 33}, + dictWord{ + 133, + 0, + 26, + }, + dictWord{132, 0, 550}, + dictWord{5, 11, 2}, + dictWord{7, 11, 1494}, + dictWord{136, 11, 589}, + dictWord{6, 11, 512}, + dictWord{7, 11, 797}, + dictWord{ + 8, + 11, + 253, + }, + dictWord{9, 11, 77}, + dictWord{10, 11, 1}, + dictWord{10, 11, 129}, + dictWord{10, 11, 225}, + dictWord{11, 11, 118}, + dictWord{11, 11, 226}, + dictWord{ + 11, + 11, + 251, + }, + dictWord{11, 11, 430}, + dictWord{11, 11, 701}, + dictWord{11, 11, 974}, + dictWord{11, 11, 982}, + dictWord{12, 11, 64}, + dictWord{12, 11, 260}, + dictWord{ + 12, + 11, + 488, + }, + dictWord{140, 11, 690}, + dictWord{7, 10, 893}, + dictWord{141, 10, 424}, + dictWord{134, 0, 901}, + dictWord{136, 0, 822}, + dictWord{4, 0, 902}, + dictWord{5, 0, 809}, + dictWord{134, 0, 122}, + dictWord{6, 0, 807}, + dictWord{134, 0, 1366}, + dictWord{7, 0, 262}, + dictWord{5, 11, 748}, + dictWord{134, 11, 553}, + dictWord{133, 0, 620}, + dictWord{4, 0, 34}, + dictWord{5, 0, 574}, + dictWord{7, 0, 279}, + dictWord{7, 0, 1624}, + dictWord{136, 0, 601}, + dictWord{9, 0, 170}, + dictWord{ + 6, + 10, + 322, + }, + dictWord{9, 10, 552}, + dictWord{11, 10, 274}, + dictWord{13, 10, 209}, + dictWord{13, 10, 499}, + dictWord{14, 10, 85}, + dictWord{15, 10, 126}, + dictWord{ + 145, + 10, + 70, + }, + dictWord{132, 0, 537}, + dictWord{4, 11, 12}, + dictWord{7, 11, 420}, + dictWord{7, 11, 522}, + dictWord{7, 11, 809}, + dictWord{8, 11, 797}, + dictWord{ + 141, + 11, + 88, + }, + dictWord{133, 0, 332}, + dictWord{8, 10, 83}, + dictWord{8, 10, 742}, + dictWord{8, 10, 817}, + dictWord{9, 10, 28}, + dictWord{9, 10, 29}, + dictWord{9, 10, 885}, + dictWord{10, 10, 387}, + dictWord{11, 10, 633}, + dictWord{11, 10, 740}, + dictWord{13, 10, 235}, + dictWord{13, 10, 254}, + dictWord{15, 10, 143}, + dictWord{ + 143, + 10, + 146, + }, + dictWord{6, 0, 1909}, + dictWord{9, 0, 964}, + dictWord{12, 0, 822}, + dictWord{12, 0, 854}, + dictWord{12, 0, 865}, + dictWord{12, 0, 910}, + dictWord{12, 0, 938}, + dictWord{15, 0, 169}, + dictWord{15, 0, 208}, + dictWord{15, 0, 211}, + dictWord{18, 0, 205}, + dictWord{18, 0, 206}, + dictWord{18, 0, 220}, + dictWord{18, 0, 223}, + dictWord{152, 0, 24}, + dictWord{140, 10, 49}, + dictWord{5, 11, 528}, + dictWord{135, 11, 1580}, + dictWord{6, 0, 261}, + dictWord{8, 0, 182}, + dictWord{139, 0, 943}, + dictWord{134, 0, 1721}, + dictWord{4, 0, 933}, + dictWord{133, 0, 880}, + dictWord{136, 11, 321}, + dictWord{5, 11, 266}, + dictWord{9, 11, 290}, + dictWord{9, 11, 364}, + dictWord{10, 11, 293}, + dictWord{11, 11, 606}, + dictWord{142, 11, 45}, + dictWord{6, 0, 1609}, + dictWord{4, 11, 50}, + dictWord{6, 11, 510}, + dictWord{6, 11, 594}, + dictWord{9, 11, 121}, + dictWord{10, 11, 49}, + dictWord{10, 11, 412}, + dictWord{139, 11, 834}, + dictWord{7, 0, 895}, + dictWord{136, 11, 748}, + dictWord{132, 11, 466}, + dictWord{4, 10, 110}, + dictWord{10, 10, 415}, + dictWord{10, 10, 597}, + dictWord{142, 10, 206}, + dictWord{133, 0, 812}, + dictWord{135, 11, 281}, + dictWord{ + 6, + 0, + 1890, + }, + dictWord{6, 0, 1902}, + dictWord{6, 0, 1916}, + dictWord{9, 0, 929}, + dictWord{9, 0, 942}, + dictWord{9, 0, 975}, + dictWord{9, 0, 984}, + dictWord{9, 0, 986}, + dictWord{ + 9, + 0, + 1011, + }, + dictWord{9, 0, 1019}, + dictWord{12, 0, 804}, + dictWord{12, 0, 851}, + dictWord{12, 0, 867}, + dictWord{12, 0, 916}, + dictWord{12, 0, 923}, + dictWord{ + 15, + 0, + 194, + }, + dictWord{15, 0, 204}, + dictWord{15, 0, 210}, + dictWord{15, 0, 222}, + dictWord{15, 0, 223}, + dictWord{15, 0, 229}, + dictWord{15, 0, 250}, + dictWord{ + 18, + 0, + 179, + }, + dictWord{18, 0, 186}, + dictWord{18, 0, 192}, + dictWord{7, 10, 205}, + dictWord{135, 10, 2000}, + dictWord{132, 11, 667}, + dictWord{135, 0, 778}, + dictWord{ + 4, + 0, + 137, + }, + dictWord{7, 0, 1178}, + dictWord{135, 0, 1520}, + dictWord{134, 0, 1314}, + dictWord{4, 11, 242}, + dictWord{134, 11, 333}, + dictWord{6, 0, 1661}, + dictWord{7, 0, 1975}, + dictWord{7, 0, 2009}, + dictWord{135, 0, 2011}, + dictWord{134, 0, 1591}, + dictWord{4, 10, 283}, + dictWord{135, 10, 1194}, + dictWord{ + 11, + 0, + 820, + }, + dictWord{150, 0, 51}, + dictWord{4, 11, 39}, + dictWord{5, 11, 36}, + dictWord{7, 11, 1843}, + dictWord{8, 11, 407}, + dictWord{11, 11, 144}, + dictWord{ + 140, + 11, + 523, + }, + dictWord{134, 10, 1720}, + dictWord{4, 11, 510}, + dictWord{7, 11, 29}, + dictWord{7, 11, 66}, + dictWord{7, 11, 1980}, + dictWord{10, 11, 487}, + dictWord{ + 10, + 11, + 809, + }, + dictWord{146, 11, 9}, + dictWord{5, 0, 89}, + dictWord{7, 0, 1915}, + dictWord{9, 0, 185}, + dictWord{9, 0, 235}, + dictWord{10, 0, 64}, + dictWord{10, 0, 270}, + dictWord{10, 0, 403}, + dictWord{10, 0, 469}, + dictWord{10, 0, 529}, + dictWord{10, 0, 590}, + dictWord{11, 0, 140}, + dictWord{11, 0, 860}, + dictWord{13, 0, 1}, + dictWord{ + 13, + 0, + 422, + }, + dictWord{14, 0, 341}, + dictWord{14, 0, 364}, + dictWord{17, 0, 93}, + dictWord{18, 0, 113}, + dictWord{19, 0, 97}, + dictWord{147, 0, 113}, + dictWord{133, 0, 695}, + dictWord{6, 0, 987}, + dictWord{134, 0, 1160}, + dictWord{5, 0, 6}, + dictWord{6, 0, 183}, + dictWord{7, 0, 680}, + dictWord{7, 0, 978}, + dictWord{7, 0, 1013}, + dictWord{ + 7, + 0, + 1055, + }, + dictWord{12, 0, 230}, + dictWord{13, 0, 172}, + dictWord{146, 0, 29}, + dictWord{134, 11, 570}, + dictWord{132, 11, 787}, + dictWord{134, 11, 518}, + dictWord{ + 6, + 0, + 29, + }, + dictWord{139, 0, 63}, + dictWord{132, 11, 516}, + dictWord{136, 11, 821}, + dictWord{132, 0, 311}, + dictWord{134, 0, 1740}, + dictWord{7, 0, 170}, + dictWord{8, 0, 90}, + dictWord{8, 0, 177}, + dictWord{8, 0, 415}, + dictWord{11, 0, 714}, + dictWord{14, 0, 281}, + dictWord{136, 10, 735}, + dictWord{134, 0, 1961}, + dictWord{ + 135, + 11, + 1405, + }, + dictWord{4, 11, 10}, + dictWord{7, 11, 917}, + dictWord{139, 11, 786}, + dictWord{5, 10, 132}, + dictWord{9, 10, 486}, + dictWord{9, 10, 715}, + dictWord{ + 10, + 10, + 458, + }, + dictWord{11, 10, 373}, + dictWord{11, 10, 668}, + dictWord{11, 10, 795}, + dictWord{11, 10, 897}, + dictWord{12, 10, 272}, + dictWord{12, 10, 424}, + dictWord{12, 10, 539}, + dictWord{12, 10, 558}, + dictWord{14, 10, 245}, + dictWord{14, 10, 263}, + dictWord{14, 10, 264}, + dictWord{14, 10, 393}, + dictWord{ + 142, + 10, + 403, + }, + dictWord{11, 0, 91}, + dictWord{13, 0, 129}, + dictWord{15, 0, 101}, + dictWord{145, 0, 125}, + dictWord{135, 0, 1132}, + dictWord{4, 0, 494}, + dictWord{6, 0, 74}, + dictWord{7, 0, 44}, + dictWord{7, 0, 407}, + dictWord{12, 0, 17}, + dictWord{15, 0, 5}, + dictWord{148, 0, 11}, + dictWord{133, 10, 379}, + dictWord{5, 0, 270}, + dictWord{ + 5, + 11, + 684, + }, + dictWord{6, 10, 89}, + dictWord{6, 10, 400}, + dictWord{7, 10, 1569}, + dictWord{7, 10, 1623}, + dictWord{7, 10, 1850}, + dictWord{8, 10, 218}, + dictWord{ + 8, + 10, + 422, + }, + dictWord{9, 10, 570}, + dictWord{138, 10, 626}, + dictWord{4, 0, 276}, + dictWord{133, 0, 296}, + dictWord{6, 0, 1523}, + dictWord{134, 11, 27}, + dictWord{ + 6, + 10, + 387, + }, + dictWord{7, 10, 882}, + dictWord{141, 10, 111}, + dictWord{6, 10, 224}, + dictWord{7, 10, 877}, + dictWord{137, 10, 647}, + dictWord{135, 10, 790}, + dictWord{ + 4, + 0, + 7, + }, + dictWord{5, 0, 90}, + dictWord{5, 0, 158}, + dictWord{6, 0, 542}, + dictWord{7, 0, 221}, + dictWord{7, 0, 1574}, + dictWord{9, 0, 490}, + dictWord{10, 0, 540}, + dictWord{ + 11, + 0, + 443, + }, + dictWord{139, 0, 757}, + dictWord{7, 0, 588}, + dictWord{9, 0, 175}, + dictWord{138, 0, 530}, + dictWord{135, 10, 394}, + dictWord{142, 11, 23}, + dictWord{ + 134, + 0, + 786, + }, + dictWord{135, 0, 580}, + dictWord{7, 0, 88}, + dictWord{136, 0, 627}, + dictWord{5, 0, 872}, + dictWord{6, 0, 57}, + dictWord{7, 0, 471}, + dictWord{9, 0, 447}, + dictWord{137, 0, 454}, + dictWord{6, 11, 342}, + dictWord{6, 11, 496}, + dictWord{8, 11, 275}, + dictWord{137, 11, 206}, + dictWord{4, 11, 909}, + dictWord{133, 11, 940}, + dictWord{6, 0, 735}, + dictWord{132, 11, 891}, + dictWord{8, 0, 845}, + dictWord{8, 0, 916}, + dictWord{135, 10, 1409}, + dictWord{5, 0, 31}, + dictWord{134, 0, 614}, + dictWord{11, 0, 458}, + dictWord{12, 0, 15}, + dictWord{140, 0, 432}, + dictWord{8, 0, 330}, + dictWord{140, 0, 477}, + dictWord{4, 0, 530}, + dictWord{5, 0, 521}, + dictWord{ + 7, + 0, + 1200, + }, + dictWord{10, 0, 460}, + dictWord{132, 11, 687}, + dictWord{6, 0, 424}, + dictWord{135, 0, 1866}, + dictWord{9, 0, 569}, + dictWord{12, 0, 12}, + dictWord{ + 12, + 0, + 81, + }, + dictWord{12, 0, 319}, + dictWord{13, 0, 69}, + dictWord{14, 0, 259}, + dictWord{16, 0, 87}, + dictWord{17, 0, 1}, + dictWord{17, 0, 21}, + dictWord{17, 0, 24}, + dictWord{ + 18, + 0, + 15, + }, + dictWord{18, 0, 56}, + dictWord{18, 0, 59}, + dictWord{18, 0, 127}, + dictWord{18, 0, 154}, + dictWord{19, 0, 19}, + dictWord{148, 0, 31}, + dictWord{7, 0, 1302}, + dictWord{136, 10, 38}, + dictWord{134, 11, 253}, + dictWord{5, 10, 261}, + dictWord{7, 10, 78}, + dictWord{7, 10, 199}, + dictWord{8, 10, 815}, + dictWord{9, 10, 126}, + dictWord{138, 10, 342}, + dictWord{5, 0, 595}, + dictWord{135, 0, 1863}, + dictWord{6, 11, 41}, + dictWord{141, 11, 160}, + dictWord{5, 0, 13}, + dictWord{134, 0, 142}, + dictWord{6, 0, 97}, + dictWord{7, 0, 116}, + dictWord{8, 0, 322}, + dictWord{8, 0, 755}, + dictWord{9, 0, 548}, + dictWord{10, 0, 714}, + dictWord{11, 0, 884}, + dictWord{13, 0, 324}, + dictWord{7, 11, 1304}, + dictWord{138, 11, 477}, + dictWord{132, 10, 628}, + dictWord{134, 11, 1718}, + dictWord{7, 10, 266}, + dictWord{136, 10, 804}, + dictWord{135, 10, 208}, + dictWord{7, 0, 1021}, + dictWord{6, 10, 79}, + dictWord{135, 10, 1519}, + dictWord{7, 0, 1472}, + dictWord{135, 0, 1554}, + dictWord{6, 11, 362}, + dictWord{146, 11, 51}, + dictWord{7, 0, 1071}, + dictWord{7, 0, 1541}, + dictWord{7, 0, 1767}, + dictWord{7, 0, 1806}, + dictWord{11, 0, 162}, + dictWord{11, 0, 242}, + dictWord{11, 0, 452}, + dictWord{12, 0, 605}, + dictWord{15, 0, 26}, + dictWord{144, 0, 44}, + dictWord{136, 10, 741}, + dictWord{133, 11, 115}, + dictWord{145, 0, 115}, + dictWord{134, 10, 376}, + dictWord{6, 0, 1406}, + dictWord{134, 0, 1543}, + dictWord{5, 11, 193}, + dictWord{12, 11, 178}, + dictWord{13, 11, 130}, + dictWord{ + 145, + 11, + 84, + }, + dictWord{135, 0, 1111}, + dictWord{8, 0, 1}, + dictWord{9, 0, 650}, + dictWord{10, 0, 326}, + dictWord{5, 11, 705}, + dictWord{137, 11, 606}, + dictWord{5, 0, 488}, + dictWord{6, 0, 527}, + dictWord{7, 0, 489}, + dictWord{7, 0, 1636}, + dictWord{8, 0, 121}, + dictWord{8, 0, 144}, + dictWord{8, 0, 359}, + dictWord{9, 0, 193}, + dictWord{9, 0, 241}, + dictWord{9, 0, 336}, + dictWord{9, 0, 882}, + dictWord{11, 0, 266}, + dictWord{11, 0, 372}, + dictWord{11, 0, 944}, + dictWord{12, 0, 401}, + dictWord{140, 0, 641}, + dictWord{135, 11, 174}, + dictWord{6, 0, 267}, + dictWord{7, 10, 244}, + dictWord{7, 10, 632}, + dictWord{7, 10, 1609}, + dictWord{8, 10, 178}, + dictWord{8, 10, 638}, + dictWord{141, 10, 58}, + dictWord{134, 0, 1983}, + dictWord{134, 0, 1155}, + dictWord{134, 0, 1575}, + dictWord{134, 0, 1438}, + dictWord{9, 0, 31}, + dictWord{ + 10, + 0, + 244, + }, + dictWord{10, 0, 699}, + dictWord{12, 0, 149}, + dictWord{141, 0, 497}, + dictWord{133, 0, 377}, + dictWord{4, 11, 122}, + dictWord{5, 11, 796}, + dictWord{ + 5, + 11, + 952, + }, + dictWord{6, 11, 1660}, + dictWord{6, 11, 1671}, + dictWord{8, 11, 567}, + dictWord{9, 11, 687}, + dictWord{9, 11, 742}, + dictWord{10, 11, 686}, + dictWord{ + 11, + 11, + 356, + }, + dictWord{11, 11, 682}, + dictWord{140, 11, 281}, + dictWord{145, 0, 101}, + dictWord{11, 11, 0}, + dictWord{144, 11, 78}, + dictWord{5, 11, 179}, + dictWord{ + 5, + 10, + 791, + }, + dictWord{7, 11, 1095}, + dictWord{135, 11, 1213}, + dictWord{8, 11, 372}, + dictWord{9, 11, 122}, + dictWord{138, 11, 175}, + dictWord{7, 10, 686}, + dictWord{8, 10, 33}, + dictWord{8, 10, 238}, + dictWord{10, 10, 616}, + dictWord{11, 10, 467}, + dictWord{11, 10, 881}, + dictWord{13, 10, 217}, + dictWord{13, 10, 253}, + dictWord{142, 10, 268}, + dictWord{9, 0, 476}, + dictWord{4, 11, 66}, + dictWord{7, 11, 722}, + dictWord{135, 11, 904}, + dictWord{7, 11, 352}, + dictWord{137, 11, 684}, + dictWord{135, 0, 2023}, + dictWord{135, 0, 1836}, + dictWord{132, 10, 447}, + dictWord{5, 0, 843}, + dictWord{144, 0, 35}, + dictWord{137, 11, 779}, + dictWord{ + 141, + 11, + 35, + }, + dictWord{4, 10, 128}, + dictWord{5, 10, 415}, + dictWord{6, 10, 462}, + dictWord{7, 10, 294}, + dictWord{7, 10, 578}, + dictWord{10, 10, 710}, + dictWord{ + 139, + 10, + 86, + }, + dictWord{132, 0, 554}, + dictWord{133, 0, 536}, + dictWord{136, 10, 587}, + dictWord{5, 0, 207}, + dictWord{9, 0, 79}, + dictWord{11, 0, 625}, + dictWord{ + 145, + 0, + 7, + }, + dictWord{7, 0, 1371}, + dictWord{6, 10, 427}, + dictWord{138, 10, 692}, + dictWord{4, 0, 424}, + dictWord{4, 10, 195}, + dictWord{135, 10, 802}, + dictWord{ + 8, + 0, + 785, + }, + dictWord{133, 11, 564}, + dictWord{135, 0, 336}, + dictWord{4, 0, 896}, + dictWord{6, 0, 1777}, + dictWord{134, 11, 556}, + dictWord{137, 11, 103}, + dictWord{134, 10, 1683}, + dictWord{7, 11, 544}, + dictWord{8, 11, 719}, + dictWord{138, 11, 61}, + dictWord{138, 10, 472}, + dictWord{4, 11, 5}, + dictWord{5, 11, 498}, + dictWord{136, 11, 637}, + dictWord{7, 0, 750}, + dictWord{9, 0, 223}, + dictWord{11, 0, 27}, + dictWord{11, 0, 466}, + dictWord{12, 0, 624}, + dictWord{14, 0, 265}, + dictWord{ + 146, + 0, + 61, + }, + dictWord{12, 0, 238}, + dictWord{18, 0, 155}, + dictWord{12, 11, 238}, + dictWord{146, 11, 155}, + dictWord{151, 10, 28}, + dictWord{133, 11, 927}, + dictWord{12, 0, 383}, + dictWord{5, 10, 3}, + dictWord{8, 10, 578}, + dictWord{9, 10, 118}, + dictWord{10, 10, 705}, + dictWord{141, 10, 279}, + dictWord{4, 11, 893}, + dictWord{ + 5, + 11, + 780, + }, + dictWord{133, 11, 893}, + dictWord{4, 0, 603}, + dictWord{133, 0, 661}, + dictWord{4, 0, 11}, + dictWord{6, 0, 128}, + dictWord{7, 0, 231}, + dictWord{ + 7, + 0, + 1533, + }, + dictWord{10, 0, 725}, + dictWord{5, 10, 229}, + dictWord{5, 11, 238}, + dictWord{135, 11, 1350}, + dictWord{8, 10, 102}, + dictWord{10, 10, 578}, + dictWord{ + 10, + 10, + 672, + }, + dictWord{12, 10, 496}, + dictWord{13, 10, 408}, + dictWord{14, 10, 121}, + dictWord{145, 10, 106}, + dictWord{132, 0, 476}, + dictWord{134, 0, 1552}, + dictWord{134, 11, 1729}, + dictWord{8, 10, 115}, + dictWord{8, 10, 350}, + dictWord{9, 10, 489}, + dictWord{10, 10, 128}, + dictWord{11, 10, 306}, + dictWord{ + 12, + 10, + 373, + }, + dictWord{14, 10, 30}, + dictWord{17, 10, 79}, + dictWord{19, 10, 80}, + dictWord{150, 10, 55}, + dictWord{135, 0, 1807}, + dictWord{4, 0, 680}, + dictWord{ + 4, + 11, + 60, + }, + dictWord{7, 11, 760}, + dictWord{7, 11, 1800}, + dictWord{8, 11, 314}, + dictWord{9, 11, 700}, + dictWord{139, 11, 487}, + dictWord{4, 10, 230}, + dictWord{ + 5, + 10, + 702, + }, + dictWord{148, 11, 94}, + dictWord{132, 11, 228}, + dictWord{139, 0, 435}, + dictWord{9, 0, 20}, + dictWord{10, 0, 324}, + dictWord{10, 0, 807}, + dictWord{ + 139, + 0, + 488, + }, + dictWord{6, 10, 1728}, + dictWord{136, 11, 419}, + dictWord{4, 10, 484}, + dictWord{18, 10, 26}, + dictWord{19, 10, 42}, + dictWord{20, 10, 43}, + dictWord{ + 21, + 10, + 0, + }, + dictWord{23, 10, 27}, + dictWord{152, 10, 14}, + dictWord{135, 0, 1431}, + dictWord{133, 11, 828}, + dictWord{5, 0, 112}, + dictWord{6, 0, 103}, + dictWord{ + 6, + 0, + 150, + }, + dictWord{7, 0, 1303}, + dictWord{9, 0, 292}, + dictWord{10, 0, 481}, + dictWord{20, 0, 13}, + dictWord{7, 11, 176}, + dictWord{7, 11, 178}, + dictWord{7, 11, 1110}, + dictWord{10, 11, 481}, + dictWord{148, 11, 13}, + dictWord{138, 0, 356}, + dictWord{4, 11, 51}, + dictWord{5, 11, 39}, + dictWord{6, 11, 4}, + dictWord{7, 11, 591}, + dictWord{ + 7, + 11, + 849, + }, + dictWord{7, 11, 951}, + dictWord{7, 11, 1129}, + dictWord{7, 11, 1613}, + dictWord{7, 11, 1760}, + dictWord{7, 11, 1988}, + dictWord{9, 11, 434}, + dictWord{10, 11, 754}, + dictWord{11, 11, 25}, + dictWord{11, 11, 37}, + dictWord{139, 11, 414}, + dictWord{6, 0, 1963}, + dictWord{134, 0, 2000}, + dictWord{ + 132, + 10, + 633, + }, + dictWord{6, 0, 1244}, + dictWord{133, 11, 902}, + dictWord{135, 11, 928}, + dictWord{140, 0, 18}, + dictWord{138, 0, 204}, + dictWord{135, 11, 1173}, + dictWord{134, 0, 867}, + dictWord{4, 0, 708}, + dictWord{8, 0, 15}, + dictWord{9, 0, 50}, + dictWord{9, 0, 386}, + dictWord{11, 0, 18}, + dictWord{11, 0, 529}, + dictWord{140, 0, 228}, + dictWord{134, 11, 270}, + dictWord{4, 0, 563}, + dictWord{7, 0, 109}, + dictWord{7, 0, 592}, + dictWord{7, 0, 637}, + dictWord{7, 0, 770}, + dictWord{8, 0, 463}, + dictWord{ + 9, + 0, + 60, + }, + dictWord{9, 0, 335}, + dictWord{9, 0, 904}, + dictWord{10, 0, 73}, + dictWord{11, 0, 434}, + dictWord{12, 0, 585}, + dictWord{13, 0, 331}, + dictWord{18, 0, 110}, + dictWord{148, 0, 60}, + dictWord{132, 0, 502}, + dictWord{14, 11, 359}, + dictWord{19, 11, 52}, + dictWord{148, 11, 47}, + dictWord{6, 11, 377}, + dictWord{7, 11, 1025}, + dictWord{9, 11, 613}, + dictWord{145, 11, 104}, + dictWord{6, 0, 347}, + dictWord{10, 0, 161}, + dictWord{5, 10, 70}, + dictWord{5, 10, 622}, + dictWord{6, 10, 334}, + dictWord{ + 7, + 10, + 1032, + }, + dictWord{9, 10, 171}, + dictWord{11, 10, 26}, + dictWord{11, 10, 213}, + dictWord{11, 10, 637}, + dictWord{11, 10, 707}, + dictWord{12, 10, 202}, + dictWord{12, 10, 380}, + dictWord{13, 10, 226}, + dictWord{13, 10, 355}, + dictWord{14, 10, 222}, + dictWord{145, 10, 42}, + dictWord{132, 11, 416}, + dictWord{4, 0, 33}, + dictWord{5, 0, 102}, + dictWord{6, 0, 284}, + dictWord{7, 0, 1079}, + dictWord{7, 0, 1423}, + dictWord{7, 0, 1702}, + dictWord{8, 0, 470}, + dictWord{9, 0, 554}, + dictWord{ + 9, + 0, + 723, + }, + dictWord{11, 0, 333}, + dictWord{142, 11, 372}, + dictWord{5, 11, 152}, + dictWord{5, 11, 197}, + dictWord{7, 11, 340}, + dictWord{7, 11, 867}, + dictWord{ + 10, + 11, + 548, + }, + dictWord{10, 11, 581}, + dictWord{11, 11, 6}, + dictWord{12, 11, 3}, + dictWord{12, 11, 19}, + dictWord{14, 11, 110}, + dictWord{142, 11, 289}, + dictWord{ + 7, + 0, + 246, + }, + dictWord{135, 0, 840}, + dictWord{6, 0, 10}, + dictWord{8, 0, 571}, + dictWord{9, 0, 739}, + dictWord{143, 0, 91}, + dictWord{6, 0, 465}, + dictWord{7, 0, 1465}, + dictWord{ + 4, + 10, + 23, + }, + dictWord{4, 10, 141}, + dictWord{5, 10, 313}, + dictWord{5, 10, 1014}, + dictWord{6, 10, 50}, + dictWord{7, 10, 142}, + dictWord{7, 10, 559}, + dictWord{ + 8, + 10, + 640, + }, + dictWord{9, 10, 460}, + dictWord{9, 10, 783}, + dictWord{11, 10, 741}, + dictWord{12, 10, 183}, + dictWord{141, 10, 488}, + dictWord{133, 0, 626}, + dictWord{ + 136, + 0, + 614, + }, + dictWord{138, 0, 237}, + dictWord{7, 11, 34}, + dictWord{7, 11, 190}, + dictWord{8, 11, 28}, + dictWord{8, 11, 141}, + dictWord{8, 11, 444}, + dictWord{ + 8, + 11, + 811, + }, + dictWord{9, 11, 468}, + dictWord{11, 11, 334}, + dictWord{12, 11, 24}, + dictWord{12, 11, 386}, + dictWord{140, 11, 576}, + dictWord{133, 11, 757}, + dictWord{ + 5, + 0, + 18, + }, + dictWord{6, 0, 526}, + dictWord{13, 0, 24}, + dictWord{13, 0, 110}, + dictWord{19, 0, 5}, + dictWord{147, 0, 44}, + dictWord{6, 0, 506}, + dictWord{134, 11, 506}, + dictWord{135, 11, 1553}, + dictWord{4, 0, 309}, + dictWord{5, 0, 462}, + dictWord{7, 0, 970}, + dictWord{7, 0, 1097}, + dictWord{22, 0, 30}, + dictWord{22, 0, 33}, + dictWord{ + 7, + 11, + 1385, + }, + dictWord{11, 11, 582}, + dictWord{11, 11, 650}, + dictWord{11, 11, 901}, + dictWord{11, 11, 949}, + dictWord{12, 11, 232}, + dictWord{12, 11, 236}, + dictWord{13, 11, 413}, + dictWord{13, 11, 501}, + dictWord{146, 11, 116}, + dictWord{9, 0, 140}, + dictWord{5, 10, 222}, + dictWord{138, 10, 534}, + dictWord{6, 0, 1056}, + dictWord{137, 10, 906}, + dictWord{134, 0, 1704}, + dictWord{138, 10, 503}, + dictWord{134, 0, 1036}, + dictWord{5, 10, 154}, + dictWord{7, 10, 1491}, + dictWord{ + 10, + 10, + 379, + }, + dictWord{138, 10, 485}, + dictWord{4, 11, 383}, + dictWord{133, 10, 716}, + dictWord{134, 0, 1315}, + dictWord{5, 0, 86}, + dictWord{7, 0, 743}, + dictWord{ + 9, + 0, + 85, + }, + dictWord{10, 0, 281}, + dictWord{10, 0, 432}, + dictWord{11, 0, 825}, + dictWord{12, 0, 251}, + dictWord{13, 0, 118}, + dictWord{142, 0, 378}, + dictWord{ + 8, + 0, + 264, + }, + dictWord{4, 10, 91}, + dictWord{5, 10, 388}, + dictWord{5, 10, 845}, + dictWord{6, 10, 206}, + dictWord{6, 10, 252}, + dictWord{6, 10, 365}, + dictWord{7, 10, 136}, + dictWord{7, 10, 531}, + dictWord{136, 10, 621}, + dictWord{5, 0, 524}, + dictWord{133, 0, 744}, + dictWord{5, 11, 277}, + dictWord{141, 11, 247}, + dictWord{ + 132, + 11, + 435, + }, + dictWord{10, 0, 107}, + dictWord{140, 0, 436}, + dictWord{132, 0, 927}, + dictWord{10, 0, 123}, + dictWord{12, 0, 670}, + dictWord{146, 0, 94}, + dictWord{ + 7, + 0, + 1149, + }, + dictWord{9, 0, 156}, + dictWord{138, 0, 957}, + dictWord{5, 11, 265}, + dictWord{6, 11, 212}, + dictWord{135, 11, 28}, + dictWord{133, 0, 778}, + dictWord{ + 133, + 0, + 502, + }, + dictWord{8, 0, 196}, + dictWord{10, 0, 283}, + dictWord{139, 0, 406}, + dictWord{135, 10, 576}, + dictWord{136, 11, 535}, + dictWord{134, 0, 1312}, + dictWord{ + 5, + 10, + 771, + }, + dictWord{5, 10, 863}, + dictWord{5, 10, 898}, + dictWord{6, 10, 1632}, + dictWord{6, 10, 1644}, + dictWord{134, 10, 1780}, + dictWord{5, 0, 855}, + dictWord{5, 10, 331}, + dictWord{135, 11, 1487}, + dictWord{132, 11, 702}, + dictWord{5, 11, 808}, + dictWord{135, 11, 2045}, + dictWord{7, 0, 1400}, + dictWord{ + 9, + 0, + 446, + }, + dictWord{138, 0, 45}, + dictWord{140, 10, 632}, + dictWord{132, 0, 1003}, + dictWord{5, 11, 166}, + dictWord{8, 11, 739}, + dictWord{140, 11, 511}, + dictWord{ + 5, + 10, + 107, + }, + dictWord{7, 10, 201}, + dictWord{136, 10, 518}, + dictWord{6, 10, 446}, + dictWord{135, 10, 1817}, + dictWord{134, 0, 1532}, + dictWord{ + 134, + 0, + 1097, + }, + dictWord{4, 11, 119}, + dictWord{5, 11, 170}, + dictWord{5, 11, 447}, + dictWord{7, 11, 1708}, + dictWord{7, 11, 1889}, + dictWord{9, 11, 357}, + dictWord{ + 9, + 11, + 719, + }, + dictWord{12, 11, 486}, + dictWord{140, 11, 596}, + dictWord{9, 10, 851}, + dictWord{141, 10, 510}, + dictWord{7, 0, 612}, + dictWord{8, 0, 545}, + dictWord{ + 8, + 0, + 568, + }, + dictWord{8, 0, 642}, + dictWord{9, 0, 717}, + dictWord{10, 0, 541}, + dictWord{10, 0, 763}, + dictWord{11, 0, 449}, + dictWord{12, 0, 489}, + dictWord{13, 0, 153}, + dictWord{13, 0, 296}, + dictWord{14, 0, 138}, + dictWord{14, 0, 392}, + dictWord{15, 0, 50}, + dictWord{16, 0, 6}, + dictWord{16, 0, 12}, + dictWord{20, 0, 9}, + dictWord{ + 132, + 10, + 504, + }, + dictWord{4, 11, 450}, + dictWord{135, 11, 1158}, + dictWord{11, 0, 54}, + dictWord{13, 0, 173}, + dictWord{13, 0, 294}, + dictWord{5, 10, 883}, + dictWord{ + 5, + 10, + 975, + }, + dictWord{8, 10, 392}, + dictWord{148, 10, 7}, + dictWord{13, 0, 455}, + dictWord{15, 0, 99}, + dictWord{15, 0, 129}, + dictWord{144, 0, 68}, + dictWord{135, 0, 172}, + dictWord{132, 11, 754}, + dictWord{5, 10, 922}, + dictWord{134, 10, 1707}, + dictWord{134, 0, 1029}, + dictWord{17, 11, 39}, + dictWord{148, 11, 36}, + dictWord{ + 4, + 0, + 568, + }, + dictWord{5, 10, 993}, + dictWord{7, 10, 515}, + dictWord{137, 10, 91}, + dictWord{132, 0, 732}, + dictWord{10, 0, 617}, + dictWord{138, 11, 617}, + dictWord{ + 134, + 0, + 974, + }, + dictWord{7, 0, 989}, + dictWord{10, 0, 377}, + dictWord{12, 0, 363}, + dictWord{13, 0, 68}, + dictWord{13, 0, 94}, + dictWord{14, 0, 108}, + dictWord{ + 142, + 0, + 306, + }, + dictWord{136, 0, 733}, + dictWord{132, 0, 428}, + dictWord{7, 0, 1789}, + dictWord{135, 11, 1062}, + dictWord{7, 0, 2015}, + dictWord{140, 0, 665}, + dictWord{135, 10, 1433}, + dictWord{5, 0, 287}, + dictWord{7, 10, 921}, + dictWord{8, 10, 580}, + dictWord{8, 10, 593}, + dictWord{8, 10, 630}, + dictWord{138, 10, 28}, + dictWord{138, 0, 806}, + dictWord{4, 10, 911}, + dictWord{5, 10, 867}, + dictWord{5, 10, 1013}, + dictWord{7, 10, 2034}, + dictWord{8, 10, 798}, + dictWord{136, 10, 813}, + dictWord{134, 0, 1539}, + dictWord{8, 11, 523}, + dictWord{150, 11, 34}, + dictWord{135, 11, 740}, + dictWord{7, 11, 238}, + dictWord{7, 11, 2033}, + dictWord{ + 8, + 11, + 120, + }, + dictWord{8, 11, 188}, + dictWord{8, 11, 659}, + dictWord{9, 11, 598}, + dictWord{10, 11, 466}, + dictWord{12, 11, 342}, + dictWord{12, 11, 588}, + dictWord{ + 13, + 11, + 503, + }, + dictWord{14, 11, 246}, + dictWord{143, 11, 92}, + dictWord{7, 0, 1563}, + dictWord{141, 0, 182}, + dictWord{5, 10, 135}, + dictWord{6, 10, 519}, + dictWord{ + 7, + 10, + 1722, + }, + dictWord{10, 10, 271}, + dictWord{11, 10, 261}, + dictWord{145, 10, 54}, + dictWord{14, 10, 338}, + dictWord{148, 10, 81}, + dictWord{7, 0, 484}, + dictWord{ + 4, + 10, + 300, + }, + dictWord{133, 10, 436}, + dictWord{145, 11, 114}, + dictWord{6, 0, 1623}, + dictWord{134, 0, 1681}, + dictWord{133, 11, 640}, + dictWord{4, 11, 201}, + dictWord{7, 11, 1744}, + dictWord{8, 11, 602}, + dictWord{11, 11, 247}, + dictWord{11, 11, 826}, + dictWord{145, 11, 65}, + dictWord{8, 11, 164}, + dictWord{ + 146, + 11, + 62, + }, + dictWord{6, 0, 1833}, + dictWord{6, 0, 1861}, + dictWord{136, 0, 878}, + dictWord{134, 0, 1569}, + dictWord{8, 10, 357}, + dictWord{10, 10, 745}, + dictWord{ + 14, + 10, + 426, + }, + dictWord{17, 10, 94}, + dictWord{147, 10, 57}, + dictWord{12, 0, 93}, + dictWord{12, 0, 501}, + dictWord{13, 0, 362}, + dictWord{14, 0, 151}, + dictWord{15, 0, 40}, + dictWord{15, 0, 59}, + dictWord{16, 0, 46}, + dictWord{17, 0, 25}, + dictWord{18, 0, 14}, + dictWord{18, 0, 134}, + dictWord{19, 0, 25}, + dictWord{19, 0, 69}, + dictWord{ + 20, + 0, + 16, + }, + dictWord{20, 0, 19}, + dictWord{20, 0, 66}, + dictWord{21, 0, 23}, + dictWord{21, 0, 25}, + dictWord{150, 0, 42}, + dictWord{6, 0, 1748}, + dictWord{8, 0, 715}, + dictWord{ + 9, + 0, + 802, + }, + dictWord{10, 0, 46}, + dictWord{10, 0, 819}, + dictWord{13, 0, 308}, + dictWord{14, 0, 351}, + dictWord{14, 0, 363}, + dictWord{146, 0, 67}, + dictWord{ + 132, + 0, + 994, + }, + dictWord{4, 0, 63}, + dictWord{133, 0, 347}, + dictWord{132, 0, 591}, + dictWord{133, 0, 749}, + dictWord{7, 11, 1577}, + dictWord{10, 11, 304}, + dictWord{ + 10, + 11, + 549, + }, + dictWord{11, 11, 424}, + dictWord{12, 11, 365}, + dictWord{13, 11, 220}, + dictWord{13, 11, 240}, + dictWord{142, 11, 33}, + dictWord{133, 0, 366}, + dictWord{ + 7, + 0, + 557, + }, + dictWord{12, 0, 547}, + dictWord{14, 0, 86}, + dictWord{133, 10, 387}, + dictWord{135, 0, 1747}, + dictWord{132, 11, 907}, + dictWord{5, 11, 100}, + dictWord{10, 11, 329}, + dictWord{12, 11, 416}, + dictWord{149, 11, 29}, + dictWord{4, 10, 6}, + dictWord{5, 10, 708}, + dictWord{136, 10, 75}, + dictWord{7, 10, 1351}, + dictWord{9, 10, 581}, + dictWord{10, 10, 639}, + dictWord{11, 10, 453}, + dictWord{140, 10, 584}, + dictWord{7, 0, 89}, + dictWord{132, 10, 303}, + dictWord{138, 10, 772}, + dictWord{132, 11, 176}, + dictWord{5, 11, 636}, + dictWord{5, 11, 998}, + dictWord{8, 11, 26}, + dictWord{137, 11, 358}, + dictWord{7, 11, 9}, + dictWord{7, 11, 1508}, + dictWord{9, 11, 317}, + dictWord{10, 11, 210}, + dictWord{10, 11, 292}, + dictWord{10, 11, 533}, + dictWord{11, 11, 555}, + dictWord{12, 11, 526}, + dictWord{ + 12, + 11, + 607, + }, + dictWord{13, 11, 263}, + dictWord{13, 11, 459}, + dictWord{142, 11, 271}, + dictWord{134, 0, 1463}, + dictWord{6, 0, 772}, + dictWord{6, 0, 1137}, + dictWord{ + 139, + 11, + 595, + }, + dictWord{7, 0, 977}, + dictWord{139, 11, 66}, + dictWord{138, 0, 893}, + dictWord{20, 0, 48}, + dictWord{148, 11, 48}, + dictWord{5, 0, 824}, + dictWord{ + 133, + 0, + 941, + }, + dictWord{134, 11, 295}, + dictWord{7, 0, 1543}, + dictWord{7, 0, 1785}, + dictWord{10, 0, 690}, + dictWord{4, 10, 106}, + dictWord{139, 10, 717}, + dictWord{ + 7, + 0, + 440, + }, + dictWord{8, 0, 230}, + dictWord{139, 0, 106}, + dictWord{5, 10, 890}, + dictWord{133, 10, 988}, + dictWord{6, 10, 626}, + dictWord{142, 10, 431}, + dictWord{ + 10, + 11, + 127, + }, + dictWord{141, 11, 27}, + dictWord{17, 0, 32}, + dictWord{10, 10, 706}, + dictWord{150, 10, 44}, + dictWord{132, 0, 216}, + dictWord{137, 0, 332}, + dictWord{4, 10, 698}, + dictWord{136, 11, 119}, + dictWord{139, 11, 267}, + dictWord{138, 10, 17}, + dictWord{11, 11, 526}, + dictWord{11, 11, 939}, + dictWord{ + 141, + 11, + 290, + }, + dictWord{7, 11, 1167}, + dictWord{11, 11, 934}, + dictWord{13, 11, 391}, + dictWord{145, 11, 76}, + dictWord{139, 11, 39}, + dictWord{134, 10, 84}, + dictWord{ + 4, + 0, + 914, + }, + dictWord{5, 0, 800}, + dictWord{133, 0, 852}, + dictWord{10, 0, 416}, + dictWord{141, 0, 115}, + dictWord{7, 0, 564}, + dictWord{142, 0, 168}, + dictWord{ + 4, + 0, + 918, + }, + dictWord{133, 0, 876}, + dictWord{134, 0, 1764}, + dictWord{152, 0, 3}, + dictWord{4, 0, 92}, + dictWord{5, 0, 274}, + dictWord{7, 11, 126}, + dictWord{136, 11, 84}, + dictWord{140, 10, 498}, + dictWord{136, 11, 790}, + dictWord{8, 0, 501}, + dictWord{5, 10, 986}, + dictWord{6, 10, 130}, + dictWord{7, 10, 1582}, + dictWord{ + 8, + 10, + 458, + }, + dictWord{10, 10, 101}, + dictWord{10, 10, 318}, + dictWord{138, 10, 823}, + dictWord{6, 11, 64}, + dictWord{12, 11, 377}, + dictWord{141, 11, 309}, + dictWord{ + 5, + 0, + 743, + }, + dictWord{138, 0, 851}, + dictWord{4, 0, 49}, + dictWord{7, 0, 280}, + dictWord{135, 0, 1633}, + dictWord{134, 0, 879}, + dictWord{136, 0, 47}, + dictWord{ + 7, + 10, + 1644, + }, + dictWord{137, 10, 129}, + dictWord{132, 0, 865}, + dictWord{134, 0, 1202}, + dictWord{9, 11, 34}, + dictWord{139, 11, 484}, + dictWord{135, 10, 997}, + dictWord{5, 0, 272}, + dictWord{5, 0, 908}, + dictWord{5, 0, 942}, + dictWord{8, 0, 197}, + dictWord{9, 0, 47}, + dictWord{11, 0, 538}, + dictWord{139, 0, 742}, + dictWord{ + 6, + 11, + 1700, + }, + dictWord{7, 11, 26}, + dictWord{7, 11, 293}, + dictWord{7, 11, 382}, + dictWord{7, 11, 1026}, + dictWord{7, 11, 1087}, + dictWord{7, 11, 2027}, + dictWord{ + 8, + 11, + 24, + }, + dictWord{8, 11, 114}, + dictWord{8, 11, 252}, + dictWord{8, 11, 727}, + dictWord{8, 11, 729}, + dictWord{9, 11, 30}, + dictWord{9, 11, 199}, + dictWord{9, 11, 231}, + dictWord{9, 11, 251}, + dictWord{9, 11, 334}, + dictWord{9, 11, 361}, + dictWord{9, 11, 488}, + dictWord{9, 11, 712}, + dictWord{10, 11, 55}, + dictWord{10, 11, 60}, + dictWord{ + 10, + 11, + 232, + }, + dictWord{10, 11, 332}, + dictWord{10, 11, 384}, + dictWord{10, 11, 396}, + dictWord{10, 11, 504}, + dictWord{10, 11, 542}, + dictWord{10, 11, 652}, + dictWord{11, 11, 20}, + dictWord{11, 11, 48}, + dictWord{11, 11, 207}, + dictWord{11, 11, 291}, + dictWord{11, 11, 298}, + dictWord{11, 11, 342}, + dictWord{ + 11, + 11, + 365, + }, + dictWord{11, 11, 394}, + dictWord{11, 11, 620}, + dictWord{11, 11, 705}, + dictWord{11, 11, 1017}, + dictWord{12, 11, 123}, + dictWord{12, 11, 340}, + dictWord{12, 11, 406}, + dictWord{12, 11, 643}, + dictWord{13, 11, 61}, + dictWord{13, 11, 269}, + dictWord{13, 11, 311}, + dictWord{13, 11, 319}, + dictWord{13, 11, 486}, + dictWord{14, 11, 234}, + dictWord{15, 11, 62}, + dictWord{15, 11, 85}, + dictWord{16, 11, 71}, + dictWord{18, 11, 119}, + dictWord{148, 11, 105}, + dictWord{ + 6, + 0, + 1455, + }, + dictWord{150, 11, 37}, + dictWord{135, 10, 1927}, + dictWord{135, 0, 1911}, + dictWord{137, 0, 891}, + dictWord{7, 10, 1756}, + dictWord{137, 10, 98}, + dictWord{7, 10, 1046}, + dictWord{139, 10, 160}, + dictWord{132, 0, 761}, + dictWord{6, 11, 379}, + dictWord{7, 11, 270}, + dictWord{7, 11, 1116}, + dictWord{ + 8, + 11, + 176, + }, + dictWord{8, 11, 183}, + dictWord{9, 11, 432}, + dictWord{9, 11, 661}, + dictWord{12, 11, 247}, + dictWord{12, 11, 617}, + dictWord{146, 11, 125}, + dictWord{ + 6, + 10, + 45, + }, + dictWord{7, 10, 433}, + dictWord{8, 10, 129}, + dictWord{9, 10, 21}, + dictWord{10, 10, 392}, + dictWord{11, 10, 79}, + dictWord{12, 10, 499}, + dictWord{ + 13, + 10, + 199, + }, + dictWord{141, 10, 451}, + dictWord{4, 0, 407}, + dictWord{5, 11, 792}, + dictWord{133, 11, 900}, + dictWord{132, 0, 560}, + dictWord{135, 0, 183}, + dictWord{ + 13, + 0, + 490, + }, + dictWord{7, 10, 558}, + dictWord{136, 10, 353}, + dictWord{4, 0, 475}, + dictWord{6, 0, 731}, + dictWord{11, 0, 35}, + dictWord{13, 0, 71}, + dictWord{13, 0, 177}, + dictWord{14, 0, 422}, + dictWord{133, 10, 785}, + dictWord{8, 10, 81}, + dictWord{9, 10, 189}, + dictWord{9, 10, 201}, + dictWord{11, 10, 478}, + dictWord{11, 10, 712}, + dictWord{141, 10, 338}, + dictWord{4, 0, 418}, + dictWord{4, 0, 819}, + dictWord{133, 10, 353}, + dictWord{151, 10, 26}, + dictWord{4, 11, 901}, + dictWord{ + 133, + 11, + 776, + }, + dictWord{132, 0, 575}, + dictWord{7, 0, 818}, + dictWord{16, 0, 92}, + dictWord{17, 0, 14}, + dictWord{17, 0, 45}, + dictWord{18, 0, 75}, + dictWord{148, 0, 18}, + dictWord{ + 6, + 0, + 222, + }, + dictWord{7, 0, 636}, + dictWord{7, 0, 1620}, + dictWord{8, 0, 409}, + dictWord{9, 0, 693}, + dictWord{139, 0, 77}, + dictWord{6, 10, 25}, + dictWord{7, 10, 855}, + dictWord{7, 10, 1258}, + dictWord{144, 10, 32}, + dictWord{6, 0, 1880}, + dictWord{6, 0, 1887}, + dictWord{6, 0, 1918}, + dictWord{6, 0, 1924}, + dictWord{9, 0, 967}, + dictWord{9, 0, 995}, + dictWord{9, 0, 1015}, + dictWord{12, 0, 826}, + dictWord{12, 0, 849}, + dictWord{12, 0, 857}, + dictWord{12, 0, 860}, + dictWord{12, 0, 886}, + dictWord{ + 12, + 0, + 932, + }, + dictWord{18, 0, 228}, + dictWord{18, 0, 231}, + dictWord{146, 0, 240}, + dictWord{134, 0, 633}, + dictWord{134, 0, 1308}, + dictWord{4, 11, 37}, + dictWord{ + 5, + 11, + 334, + }, + dictWord{135, 11, 1253}, + dictWord{10, 0, 86}, + dictWord{4, 10, 4}, + dictWord{7, 10, 1118}, + dictWord{7, 10, 1320}, + dictWord{7, 10, 1706}, + dictWord{ + 8, + 10, + 277, + }, + dictWord{9, 10, 622}, + dictWord{11, 10, 724}, + dictWord{12, 10, 350}, + dictWord{12, 10, 397}, + dictWord{13, 10, 28}, + dictWord{13, 10, 159}, + dictWord{ + 15, + 10, + 89, + }, + dictWord{18, 10, 5}, + dictWord{19, 10, 9}, + dictWord{20, 10, 34}, + dictWord{150, 10, 47}, + dictWord{132, 11, 508}, + dictWord{137, 11, 448}, + dictWord{ + 12, + 11, + 107, + }, + dictWord{146, 11, 31}, + dictWord{132, 0, 817}, + dictWord{134, 0, 663}, + dictWord{133, 0, 882}, + dictWord{134, 0, 914}, + dictWord{132, 11, 540}, + dictWord{132, 11, 533}, + dictWord{136, 11, 608}, + dictWord{8, 0, 885}, + dictWord{138, 0, 865}, + dictWord{132, 0, 426}, + dictWord{6, 0, 58}, + dictWord{7, 0, 745}, + dictWord{7, 0, 1969}, + dictWord{8, 0, 399}, + dictWord{8, 0, 675}, + dictWord{9, 0, 479}, + dictWord{9, 0, 731}, + dictWord{10, 0, 330}, + dictWord{10, 0, 593}, + dictWord{ + 10, + 0, + 817, + }, + dictWord{11, 0, 32}, + dictWord{11, 0, 133}, + dictWord{11, 0, 221}, + dictWord{145, 0, 68}, + dictWord{134, 10, 255}, + dictWord{7, 0, 102}, + dictWord{ + 137, + 0, + 538, + }, + dictWord{137, 10, 216}, + dictWord{7, 11, 253}, + dictWord{136, 11, 549}, + dictWord{135, 11, 912}, + dictWord{9, 10, 183}, + dictWord{139, 10, 286}, + dictWord{11, 10, 956}, + dictWord{151, 10, 3}, + dictWord{8, 11, 527}, + dictWord{18, 11, 60}, + dictWord{147, 11, 24}, + dictWord{4, 10, 536}, + dictWord{7, 10, 1141}, + dictWord{10, 10, 723}, + dictWord{139, 10, 371}, + dictWord{133, 11, 920}, + dictWord{7, 0, 876}, + dictWord{135, 10, 285}, + dictWord{135, 10, 560}, + dictWord{ + 132, + 10, + 690, + }, + dictWord{142, 11, 126}, + dictWord{11, 10, 33}, + dictWord{12, 10, 571}, + dictWord{149, 10, 1}, + dictWord{133, 0, 566}, + dictWord{9, 0, 139}, + dictWord{ + 10, + 0, + 399, + }, + dictWord{11, 0, 469}, + dictWord{12, 0, 634}, + dictWord{13, 0, 223}, + dictWord{132, 11, 483}, + dictWord{6, 0, 48}, + dictWord{135, 0, 63}, + dictWord{18, 0, 12}, + dictWord{7, 10, 1862}, + dictWord{12, 10, 491}, + dictWord{12, 10, 520}, + dictWord{13, 10, 383}, + dictWord{142, 10, 244}, + dictWord{135, 11, 1665}, + dictWord{132, 11, 448}, + dictWord{9, 11, 495}, + dictWord{146, 11, 104}, + dictWord{6, 0, 114}, + dictWord{7, 0, 1224}, + dictWord{7, 0, 1556}, + dictWord{136, 0, 3}, + dictWord{ + 4, + 10, + 190, + }, + dictWord{133, 10, 554}, + dictWord{8, 0, 576}, + dictWord{9, 0, 267}, + dictWord{133, 10, 1001}, + dictWord{133, 10, 446}, + dictWord{133, 0, 933}, + dictWord{139, 11, 1009}, + dictWord{8, 11, 653}, + dictWord{13, 11, 93}, + dictWord{147, 11, 14}, + dictWord{6, 0, 692}, + dictWord{6, 0, 821}, + dictWord{134, 0, 1077}, + dictWord{5, 11, 172}, + dictWord{135, 11, 801}, + dictWord{138, 0, 752}, + dictWord{4, 0, 375}, + dictWord{134, 0, 638}, + dictWord{134, 0, 1011}, + dictWord{ + 140, + 11, + 540, + }, + dictWord{9, 0, 96}, + dictWord{133, 11, 260}, + dictWord{139, 11, 587}, + dictWord{135, 10, 1231}, + dictWord{12, 0, 30}, + dictWord{13, 0, 148}, + dictWord{ + 14, + 0, + 87, + }, + dictWord{14, 0, 182}, + dictWord{16, 0, 42}, + dictWord{20, 0, 70}, + dictWord{132, 10, 304}, + dictWord{6, 0, 1398}, + dictWord{7, 0, 56}, + dictWord{7, 0, 1989}, + dictWord{8, 0, 337}, + dictWord{8, 0, 738}, + dictWord{9, 0, 600}, + dictWord{12, 0, 37}, + dictWord{13, 0, 447}, + dictWord{142, 0, 92}, + dictWord{138, 0, 666}, + dictWord{ + 5, + 0, + 394, + }, + dictWord{7, 0, 487}, + dictWord{136, 0, 246}, + dictWord{9, 0, 437}, + dictWord{6, 10, 53}, + dictWord{6, 10, 199}, + dictWord{7, 10, 1408}, + dictWord{8, 10, 32}, + dictWord{8, 10, 93}, + dictWord{10, 10, 397}, + dictWord{10, 10, 629}, + dictWord{11, 10, 593}, + dictWord{11, 10, 763}, + dictWord{13, 10, 326}, + dictWord{145, 10, 35}, + dictWord{134, 10, 105}, + dictWord{9, 0, 320}, + dictWord{10, 0, 506}, + dictWord{138, 10, 794}, + dictWord{7, 11, 57}, + dictWord{8, 11, 167}, + dictWord{8, 11, 375}, + dictWord{9, 11, 82}, + dictWord{9, 11, 561}, + dictWord{10, 11, 620}, + dictWord{10, 11, 770}, + dictWord{11, 10, 704}, + dictWord{141, 10, 396}, + dictWord{6, 0, 1003}, + dictWord{5, 10, 114}, + dictWord{5, 10, 255}, + dictWord{141, 10, 285}, + dictWord{7, 0, 866}, + dictWord{135, 0, 1163}, + dictWord{133, 11, 531}, + dictWord{ + 132, + 0, + 328, + }, + dictWord{7, 10, 2035}, + dictWord{8, 10, 19}, + dictWord{9, 10, 89}, + dictWord{138, 10, 831}, + dictWord{8, 11, 194}, + dictWord{136, 11, 756}, + dictWord{ + 136, + 0, + 1000, + }, + dictWord{5, 11, 453}, + dictWord{134, 11, 441}, + dictWord{4, 0, 101}, + dictWord{5, 0, 833}, + dictWord{7, 0, 1171}, + dictWord{136, 0, 744}, + dictWord{ + 133, + 0, + 726, + }, + dictWord{136, 10, 746}, + dictWord{138, 0, 176}, + dictWord{6, 0, 9}, + dictWord{6, 0, 397}, + dictWord{7, 0, 53}, + dictWord{7, 0, 1742}, + dictWord{10, 0, 632}, + dictWord{11, 0, 828}, + dictWord{140, 0, 146}, + dictWord{135, 11, 22}, + dictWord{145, 11, 64}, + dictWord{132, 0, 839}, + dictWord{11, 0, 417}, + dictWord{12, 0, 223}, + dictWord{140, 0, 265}, + dictWord{4, 11, 102}, + dictWord{7, 11, 815}, + dictWord{7, 11, 1699}, + dictWord{139, 11, 964}, + dictWord{5, 10, 955}, + dictWord{ + 136, + 10, + 814, + }, + dictWord{6, 0, 1931}, + dictWord{6, 0, 2007}, + dictWord{18, 0, 246}, + dictWord{146, 0, 247}, + dictWord{8, 0, 198}, + dictWord{11, 0, 29}, + dictWord{140, 0, 534}, + dictWord{135, 0, 1771}, + dictWord{6, 0, 846}, + dictWord{7, 11, 1010}, + dictWord{11, 11, 733}, + dictWord{11, 11, 759}, + dictWord{12, 11, 563}, + dictWord{ + 13, + 11, + 34, + }, + dictWord{14, 11, 101}, + dictWord{18, 11, 45}, + dictWord{146, 11, 129}, + dictWord{4, 0, 186}, + dictWord{5, 0, 157}, + dictWord{8, 0, 168}, + dictWord{138, 0, 6}, + dictWord{132, 11, 899}, + dictWord{133, 10, 56}, + dictWord{148, 10, 100}, + dictWord{133, 0, 875}, + dictWord{5, 0, 773}, + dictWord{5, 0, 991}, + dictWord{6, 0, 1635}, + dictWord{134, 0, 1788}, + dictWord{6, 0, 1274}, + dictWord{9, 0, 477}, + dictWord{141, 0, 78}, + dictWord{4, 0, 639}, + dictWord{7, 0, 111}, + dictWord{8, 0, 581}, + dictWord{ + 12, + 0, + 177, + }, + dictWord{6, 11, 52}, + dictWord{9, 11, 104}, + dictWord{9, 11, 559}, + dictWord{10, 10, 4}, + dictWord{10, 10, 13}, + dictWord{11, 10, 638}, + dictWord{ + 12, + 11, + 308, + }, + dictWord{19, 11, 87}, + dictWord{148, 10, 57}, + dictWord{132, 11, 604}, + dictWord{4, 11, 301}, + dictWord{133, 10, 738}, + dictWord{133, 10, 758}, + dictWord{134, 0, 1747}, + dictWord{7, 11, 1440}, + dictWord{11, 11, 854}, + dictWord{11, 11, 872}, + dictWord{11, 11, 921}, + dictWord{12, 11, 551}, + dictWord{ + 13, + 11, + 472, + }, + dictWord{142, 11, 367}, + dictWord{7, 0, 1364}, + dictWord{7, 0, 1907}, + dictWord{141, 0, 158}, + dictWord{134, 0, 873}, + dictWord{4, 0, 404}, + dictWord{ + 4, + 0, + 659, + }, + dictWord{7, 0, 552}, + dictWord{135, 0, 675}, + dictWord{135, 10, 1112}, + dictWord{139, 10, 328}, + dictWord{7, 11, 508}, + dictWord{137, 10, 133}, + dictWord{133, 0, 391}, + dictWord{5, 10, 110}, + dictWord{6, 10, 169}, + dictWord{6, 10, 1702}, + dictWord{7, 10, 400}, + dictWord{8, 10, 538}, + dictWord{9, 10, 184}, + dictWord{ + 9, + 10, + 524, + }, + dictWord{140, 10, 218}, + dictWord{6, 11, 310}, + dictWord{7, 11, 1849}, + dictWord{8, 11, 72}, + dictWord{8, 11, 272}, + dictWord{8, 11, 431}, + dictWord{ + 9, + 11, + 12, + }, + dictWord{9, 11, 351}, + dictWord{10, 11, 563}, + dictWord{10, 11, 630}, + dictWord{10, 11, 810}, + dictWord{11, 11, 367}, + dictWord{11, 11, 599}, + dictWord{11, 11, 686}, + dictWord{140, 11, 672}, + dictWord{5, 0, 540}, + dictWord{6, 0, 1697}, + dictWord{136, 0, 668}, + dictWord{132, 0, 883}, + dictWord{134, 0, 78}, + dictWord{12, 0, 628}, + dictWord{18, 0, 79}, + dictWord{6, 10, 133}, + dictWord{9, 10, 353}, + dictWord{139, 10, 993}, + dictWord{6, 11, 181}, + dictWord{7, 11, 537}, + dictWord{ + 8, + 11, + 64, + }, + dictWord{9, 11, 127}, + dictWord{10, 11, 496}, + dictWord{12, 11, 510}, + dictWord{141, 11, 384}, + dictWord{6, 10, 93}, + dictWord{7, 10, 1422}, + dictWord{ + 7, + 10, + 1851, + }, + dictWord{8, 10, 673}, + dictWord{9, 10, 529}, + dictWord{140, 10, 43}, + dictWord{137, 10, 371}, + dictWord{134, 0, 1460}, + dictWord{134, 0, 962}, + dictWord{4, 11, 244}, + dictWord{135, 11, 233}, + dictWord{9, 10, 25}, + dictWord{10, 10, 467}, + dictWord{138, 10, 559}, + dictWord{4, 10, 335}, + dictWord{ + 135, + 10, + 942, + }, + dictWord{133, 0, 460}, + dictWord{135, 11, 334}, + dictWord{134, 11, 1650}, + dictWord{4, 0, 199}, + dictWord{139, 0, 34}, + dictWord{5, 10, 601}, + dictWord{ + 8, + 10, + 39, + }, + dictWord{10, 10, 773}, + dictWord{11, 10, 84}, + dictWord{12, 10, 205}, + dictWord{142, 10, 1}, + dictWord{133, 10, 870}, + dictWord{134, 0, 388}, + dictWord{14, 0, 474}, + dictWord{148, 0, 120}, + dictWord{133, 11, 369}, + dictWord{139, 0, 271}, + dictWord{4, 0, 511}, + dictWord{9, 0, 333}, + dictWord{9, 0, 379}, + dictWord{ + 10, + 0, + 602, + }, + dictWord{11, 0, 441}, + dictWord{11, 0, 723}, + dictWord{11, 0, 976}, + dictWord{12, 0, 357}, + dictWord{132, 10, 181}, + dictWord{134, 0, 608}, + dictWord{134, 10, 1652}, + dictWord{22, 0, 49}, + dictWord{137, 11, 338}, + dictWord{140, 0, 988}, + dictWord{134, 0, 617}, + dictWord{5, 0, 938}, + dictWord{136, 0, 707}, + dictWord{132, 10, 97}, + dictWord{5, 10, 147}, + dictWord{6, 10, 286}, + dictWord{7, 10, 1362}, + dictWord{141, 10, 176}, + dictWord{6, 0, 756}, + dictWord{ + 134, + 0, + 1149, + }, + dictWord{133, 11, 896}, + dictWord{6, 10, 375}, + dictWord{7, 10, 169}, + dictWord{7, 10, 254}, + dictWord{136, 10, 780}, + dictWord{134, 0, 1583}, + dictWord{135, 10, 1447}, + dictWord{139, 0, 285}, + dictWord{7, 11, 1117}, + dictWord{8, 11, 393}, + dictWord{136, 11, 539}, + dictWord{135, 0, 344}, + dictWord{ + 6, + 0, + 469, + }, + dictWord{7, 0, 1709}, + dictWord{138, 0, 515}, + dictWord{5, 10, 629}, + dictWord{135, 10, 1549}, + dictWord{5, 11, 4}, + dictWord{5, 11, 810}, + dictWord{ + 6, + 11, + 13, + }, + dictWord{6, 11, 538}, + dictWord{6, 11, 1690}, + dictWord{6, 11, 1726}, + dictWord{7, 11, 499}, + dictWord{7, 11, 1819}, + dictWord{8, 11, 148}, + dictWord{ + 8, + 11, + 696, + }, + dictWord{8, 11, 791}, + dictWord{12, 11, 125}, + dictWord{13, 11, 54}, + dictWord{143, 11, 9}, + dictWord{135, 11, 1268}, + dictWord{137, 0, 404}, + dictWord{ + 132, + 0, + 500, + }, + dictWord{5, 0, 68}, + dictWord{134, 0, 383}, + dictWord{11, 0, 216}, + dictWord{139, 0, 340}, + dictWord{4, 11, 925}, + dictWord{5, 11, 803}, + dictWord{ + 8, + 11, + 698, + }, + dictWord{138, 11, 828}, + dictWord{4, 0, 337}, + dictWord{6, 0, 353}, + dictWord{7, 0, 1934}, + dictWord{8, 0, 488}, + dictWord{137, 0, 429}, + dictWord{7, 0, 236}, + dictWord{7, 0, 1795}, + dictWord{8, 0, 259}, + dictWord{9, 0, 135}, + dictWord{9, 0, 177}, + dictWord{9, 0, 860}, + dictWord{10, 0, 825}, + dictWord{11, 0, 115}, + dictWord{ + 11, + 0, + 370, + }, + dictWord{11, 0, 405}, + dictWord{11, 0, 604}, + dictWord{12, 0, 10}, + dictWord{12, 0, 667}, + dictWord{12, 0, 669}, + dictWord{13, 0, 76}, + dictWord{14, 0, 310}, + dictWord{15, 0, 76}, + dictWord{15, 0, 147}, + dictWord{148, 0, 23}, + dictWord{4, 0, 15}, + dictWord{4, 0, 490}, + dictWord{5, 0, 22}, + dictWord{6, 0, 244}, + dictWord{7, 0, 40}, + dictWord{7, 0, 200}, + dictWord{7, 0, 906}, + dictWord{7, 0, 1199}, + dictWord{9, 0, 616}, + dictWord{10, 0, 716}, + dictWord{11, 0, 635}, + dictWord{11, 0, 801}, + dictWord{ + 140, + 0, + 458, + }, + dictWord{12, 0, 756}, + dictWord{132, 10, 420}, + dictWord{134, 0, 1504}, + dictWord{6, 0, 757}, + dictWord{133, 11, 383}, + dictWord{6, 0, 1266}, + dictWord{ + 135, + 0, + 1735, + }, + dictWord{5, 0, 598}, + dictWord{7, 0, 791}, + dictWord{8, 0, 108}, + dictWord{9, 0, 123}, + dictWord{7, 10, 1570}, + dictWord{140, 10, 542}, + dictWord{ + 142, + 11, + 410, + }, + dictWord{9, 11, 660}, + dictWord{138, 11, 347}, +} diff --git a/vendor/github.com/andybalholm/brotli/symbol_list.go b/vendor/github.com/andybalholm/brotli/symbol_list.go new file mode 100644 index 00000000000..c5cb49e5a9d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/symbol_list.go @@ -0,0 +1,22 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +type symbolList struct { + storage []uint16 + offset int +} + +func symbolListGet(sl symbolList, i int) uint16 { + return sl.storage[i+sl.offset] +} + +func symbolListPut(sl symbolList, i int, val uint16) { + sl.storage[i+sl.offset] = val +} diff --git a/vendor/github.com/andybalholm/brotli/transform.go b/vendor/github.com/andybalholm/brotli/transform.go new file mode 100644 index 00000000000..d2c043a6227 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/transform.go @@ -0,0 +1,641 @@ +package brotli + +const ( + transformIdentity = 0 + transformOmitLast1 = 1 + transformOmitLast2 = 2 + transformOmitLast3 = 3 + transformOmitLast4 = 4 + transformOmitLast5 = 5 + transformOmitLast6 = 6 + transformOmitLast7 = 7 + transformOmitLast8 = 8 + transformOmitLast9 = 9 + transformUppercaseFirst = 10 + transformUppercaseAll = 11 + transformOmitFirst1 = 12 + transformOmitFirst2 = 13 + transformOmitFirst3 = 14 + transformOmitFirst4 = 15 + transformOmitFirst5 = 16 + transformOmitFirst6 = 17 + transformOmitFirst7 = 18 + transformOmitFirst8 = 19 + transformOmitFirst9 = 20 + transformShiftFirst = 21 + transformShiftAll = 22 + iota - 22 + numTransformTypes +) + +const transformsMaxCutOff = transformOmitLast9 + +type transforms struct { + prefix_suffix_size uint16 + prefix_suffix []byte + prefix_suffix_map []uint16 + num_transforms uint32 + transforms []byte + params []byte + cutOffTransforms [transformsMaxCutOff + 1]int16 +} + +func transformPrefixId(t *transforms, I int) byte { + return t.transforms[(I*3)+0] +} + +func transformType(t *transforms, I int) byte { + return t.transforms[(I*3)+1] +} + +func transformSuffixId(t *transforms, I int) byte { + return t.transforms[(I*3)+2] +} + +func transformPrefix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformPrefixId(t, I)]:] +} + +func transformSuffix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformSuffixId(t, I)]:] +} + +/* RFC 7932 transforms string data */ +const kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000" + +var kPrefixSuffixMap = [50]uint16{ + 0x00, + 0x02, + 0x05, + 0x0E, + 0x13, + 0x16, + 0x18, + 0x1E, + 0x23, + 0x25, + 0x2A, + 0x2D, + 0x2F, + 0x32, + 0x34, + 0x3A, + 0x3E, + 0x45, + 0x47, + 0x4E, + 0x55, + 0x5A, + 0x5C, + 0x63, + 0x68, + 0x6D, + 0x72, + 0x77, + 0x7A, + 0x7C, + 0x80, + 0x83, + 0x88, + 0x8C, + 0x8E, + 0x91, + 0x97, + 0x9F, + 0xA5, + 0xA9, + 0xAD, + 0xB2, + 0xB7, + 0xBD, + 0xC2, + 0xC7, + 0xCA, + 0xCF, + 0xD5, + 0xD8, +} + +/* RFC 7932 transforms */ +var kTransformsData = []byte{ + 49, + transformIdentity, + 49, + 49, + transformIdentity, + 0, + 0, + transformIdentity, + 0, + 49, + transformOmitFirst1, + 49, + 49, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 47, + 0, + transformIdentity, + 49, + 4, + transformIdentity, + 0, + 49, + transformIdentity, + 3, + 49, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 6, + 49, + transformOmitFirst2, + 49, + 49, + transformOmitLast1, + 49, + 1, + transformIdentity, + 0, + 49, + transformIdentity, + 1, + 0, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 7, + 49, + transformIdentity, + 9, + 48, + transformIdentity, + 0, + 49, + transformIdentity, + 8, + 49, + transformIdentity, + 5, + 49, + transformIdentity, + 10, + 49, + transformIdentity, + 11, + 49, + transformOmitLast3, + 49, + 49, + transformIdentity, + 13, + 49, + transformIdentity, + 14, + 49, + transformOmitFirst3, + 49, + 49, + transformOmitLast2, + 49, + 49, + transformIdentity, + 15, + 49, + transformIdentity, + 16, + 0, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 12, + 5, + transformIdentity, + 49, + 0, + transformIdentity, + 1, + 49, + transformOmitFirst4, + 49, + 49, + transformIdentity, + 18, + 49, + transformIdentity, + 17, + 49, + transformIdentity, + 19, + 49, + transformIdentity, + 20, + 49, + transformOmitFirst5, + 49, + 49, + transformOmitFirst6, + 49, + 47, + transformIdentity, + 49, + 49, + transformOmitLast4, + 49, + 49, + transformIdentity, + 22, + 49, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 23, + 49, + transformIdentity, + 24, + 49, + transformIdentity, + 25, + 49, + transformOmitLast7, + 49, + 49, + transformOmitLast1, + 26, + 49, + transformIdentity, + 27, + 49, + transformIdentity, + 28, + 0, + transformIdentity, + 12, + 49, + transformIdentity, + 29, + 49, + transformOmitFirst9, + 49, + 49, + transformOmitFirst7, + 49, + 49, + transformOmitLast6, + 49, + 49, + transformIdentity, + 21, + 49, + transformUppercaseFirst, + 1, + 49, + transformOmitLast8, + 49, + 49, + transformIdentity, + 31, + 49, + transformIdentity, + 32, + 47, + transformIdentity, + 3, + 49, + transformOmitLast5, + 49, + 49, + transformOmitLast9, + 49, + 0, + transformUppercaseFirst, + 1, + 49, + transformUppercaseFirst, + 8, + 5, + transformIdentity, + 21, + 49, + transformUppercaseAll, + 0, + 49, + transformUppercaseFirst, + 10, + 49, + transformIdentity, + 30, + 0, + transformIdentity, + 5, + 35, + transformIdentity, + 49, + 47, + transformIdentity, + 2, + 49, + transformUppercaseFirst, + 17, + 49, + transformIdentity, + 36, + 49, + transformIdentity, + 33, + 5, + transformIdentity, + 0, + 49, + transformUppercaseFirst, + 21, + 49, + transformUppercaseFirst, + 5, + 49, + transformIdentity, + 37, + 0, + transformIdentity, + 30, + 49, + transformIdentity, + 38, + 0, + transformUppercaseAll, + 0, + 49, + transformIdentity, + 39, + 0, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 34, + 49, + transformUppercaseAll, + 8, + 49, + transformUppercaseFirst, + 12, + 0, + transformIdentity, + 21, + 49, + transformIdentity, + 40, + 0, + transformUppercaseFirst, + 12, + 49, + transformIdentity, + 41, + 49, + transformIdentity, + 42, + 49, + transformUppercaseAll, + 17, + 49, + transformIdentity, + 43, + 0, + transformUppercaseFirst, + 5, + 49, + transformUppercaseAll, + 10, + 0, + transformIdentity, + 34, + 49, + transformUppercaseFirst, + 33, + 49, + transformIdentity, + 44, + 49, + transformUppercaseAll, + 5, + 45, + transformIdentity, + 49, + 0, + transformIdentity, + 33, + 49, + transformUppercaseFirst, + 30, + 49, + transformUppercaseAll, + 30, + 49, + transformIdentity, + 46, + 49, + transformUppercaseAll, + 1, + 49, + transformUppercaseFirst, + 34, + 0, + transformUppercaseFirst, + 33, + 0, + transformUppercaseAll, + 30, + 0, + transformUppercaseAll, + 1, + 49, + transformUppercaseAll, + 33, + 49, + transformUppercaseAll, + 21, + 49, + transformUppercaseAll, + 12, + 0, + transformUppercaseAll, + 5, + 49, + transformUppercaseAll, + 34, + 0, + transformUppercaseAll, + 12, + 0, + transformUppercaseFirst, + 30, + 0, + transformUppercaseAll, + 34, + 0, + transformUppercaseFirst, + 34, +} + +var kBrotliTransforms = transforms{ + 217, + []byte(kPrefixSuffix), + kPrefixSuffixMap[:], + 121, + kTransformsData, + nil, /* no extra parameters */ + [transformsMaxCutOff + 1]int16{0, 12, 27, 23, 42, 63, 56, 48, 59, 64}, +} + +func getTransforms() *transforms { + return &kBrotliTransforms +} + +func toUpperCase(p []byte) int { + if p[0] < 0xC0 { + if p[0] >= 'a' && p[0] <= 'z' { + p[0] ^= 32 + } + + return 1 + } + + /* An overly simplified uppercasing model for UTF-8. */ + if p[0] < 0xE0 { + p[1] ^= 32 + return 2 + } + + /* An arbitrary transform for three byte characters. */ + p[2] ^= 5 + + return 3 +} + +func shiftTransform(word []byte, word_len int, parameter uint16) int { + /* Limited sign extension: scalar < (1 << 24). */ + var scalar uint32 = (uint32(parameter) & 0x7FFF) + (0x1000000 - (uint32(parameter) & 0x8000)) + if word[0] < 0x80 { + /* 1-byte rune / 0sssssss / 7 bit scalar (ASCII). */ + scalar += uint32(word[0]) + + word[0] = byte(scalar & 0x7F) + return 1 + } else if word[0] < 0xC0 { + /* Continuation / 10AAAAAA. */ + return 1 + } else if word[0] < 0xE0 { + /* 2-byte rune / 110sssss AAssssss / 11 bit scalar. */ + if word_len < 2 { + return 1 + } + scalar += uint32(word[1]&0x3F | (word[0]&0x1F)<<6) + word[0] = byte(0xC0 | (scalar>>6)&0x1F) + word[1] = byte(uint32(word[1]&0xC0) | scalar&0x3F) + return 2 + } else if word[0] < 0xF0 { + /* 3-byte rune / 1110ssss AAssssss BBssssss / 16 bit scalar. */ + if word_len < 3 { + return word_len + } + scalar += uint32(word[2])&0x3F | uint32(word[1]&0x3F)<<6 | uint32(word[0]&0x0F)<<12 + word[0] = byte(0xE0 | (scalar>>12)&0x0F) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>6)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | scalar&0x3F) + return 3 + } else if word[0] < 0xF8 { + /* 4-byte rune / 11110sss AAssssss BBssssss CCssssss / 21 bit scalar. */ + if word_len < 4 { + return word_len + } + scalar += uint32(word[3])&0x3F | uint32(word[2]&0x3F)<<6 | uint32(word[1]&0x3F)<<12 | uint32(word[0]&0x07)<<18 + word[0] = byte(0xF0 | (scalar>>18)&0x07) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>12)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | (scalar>>6)&0x3F) + word[3] = byte(uint32(word[3]&0xC0) | scalar&0x3F) + return 4 + } + + return 1 +} + +func transformDictionaryWord(dst []byte, word []byte, len int, trans *transforms, transform_idx int) int { + var idx int = 0 + var prefix []byte = transformPrefix(trans, transform_idx) + var type_ byte = transformType(trans, transform_idx) + var suffix []byte = transformSuffix(trans, transform_idx) + { + var prefix_len int = int(prefix[0]) + prefix = prefix[1:] + for { + tmp1 := prefix_len + prefix_len-- + if tmp1 == 0 { + break + } + dst[idx] = prefix[0] + idx++ + prefix = prefix[1:] + } + } + { + var t int = int(type_) + var i int = 0 + if t <= transformOmitLast9 { + len -= t + } else if t >= transformOmitFirst1 && t <= transformOmitFirst9 { + var skip int = t - (transformOmitFirst1 - 1) + word = word[skip:] + len -= skip + } + + for i < len { + dst[idx] = word[i] + idx++ + i++ + } + if t == transformUppercaseFirst { + toUpperCase(dst[idx-len:]) + } else if t == transformUppercaseAll { + var uppercase []byte = dst + uppercase = uppercase[idx-len:] + for len > 0 { + var step int = toUpperCase(uppercase) + uppercase = uppercase[step:] + len -= step + } + } else if t == transformShiftFirst { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + shiftTransform(dst[idx-len:], int(len), param) + } else if t == transformShiftAll { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + var shift []byte = dst + shift = shift[idx-len:] + for len > 0 { + var step int = shiftTransform(shift, int(len), param) + shift = shift[step:] + len -= step + } + } + } + { + var suffix_len int = int(suffix[0]) + suffix = suffix[1:] + for { + tmp2 := suffix_len + suffix_len-- + if tmp2 == 0 { + break + } + dst[idx] = suffix[0] + idx++ + suffix = suffix[1:] + } + return idx + } +} diff --git a/vendor/github.com/andybalholm/brotli/utf8_util.go b/vendor/github.com/andybalholm/brotli/utf8_util.go new file mode 100644 index 00000000000..3244247eecc --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/utf8_util.go @@ -0,0 +1,70 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Heuristics for deciding about the UTF8-ness of strings. */ + +const kMinUTF8Ratio float64 = 0.75 + +/* Returns 1 if at least min_fraction of the bytes between pos and + pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise + returns 0. */ +func parseAsUTF8(symbol *int, input []byte, size uint) uint { + /* ASCII */ + if input[0]&0x80 == 0 { + *symbol = int(input[0]) + if *symbol > 0 { + return 1 + } + } + + /* 2-byte UTF8 */ + if size > 1 && input[0]&0xE0 == 0xC0 && input[1]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x1F)<<6 | int(input[1])&0x3F + if *symbol > 0x7F { + return 2 + } + } + + /* 3-byte UFT8 */ + if size > 2 && input[0]&0xF0 == 0xE0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x0F)<<12 | (int(input[1])&0x3F)<<6 | int(input[2])&0x3F + if *symbol > 0x7FF { + return 3 + } + } + + /* 4-byte UFT8 */ + if size > 3 && input[0]&0xF8 == 0xF0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 && input[3]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x07)<<18 | (int(input[1])&0x3F)<<12 | (int(input[2])&0x3F)<<6 | int(input[3])&0x3F + if *symbol > 0xFFFF && *symbol <= 0x10FFFF { + return 4 + } + } + + /* Not UTF8, emit a special symbol above the UTF8-code space */ + *symbol = 0x110000 | int(input[0]) + + return 1 +} + +/* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/ +func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction float64) bool { + var size_utf8 uint = 0 + var i uint = 0 + for i < length { + var symbol int + current_data := data[(pos+i)&mask:] + var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i) + i += bytes_read + if symbol < 0x110000 { + size_utf8 += bytes_read + } + } + + return float64(size_utf8) > min_fraction*float64(length) +} diff --git a/vendor/github.com/andybalholm/brotli/util.go b/vendor/github.com/andybalholm/brotli/util.go new file mode 100644 index 00000000000..a84553a6396 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/util.go @@ -0,0 +1,7 @@ +package brotli + +func assert(cond bool) { + if !cond { + panic("assertion failure") + } +} diff --git a/vendor/github.com/andybalholm/brotli/write_bits.go b/vendor/github.com/andybalholm/brotli/write_bits.go new file mode 100644 index 00000000000..87299011985 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/write_bits.go @@ -0,0 +1,52 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +/* This function writes bits into bytes in increasing addresses, and within + a byte least-significant-bit first. + + The function can write up to 56 bits in one go with WriteBits + Example: let's assume that 3 bits (Rs below) have been written already: + + BYTE-0 BYTE+1 BYTE+2 + + 0000 0RRR 0000 0000 0000 0000 + + Now, we could write 5 or less bits in MSB by just sifting by 3 + and OR'ing to BYTE-0. + + For n bits, we take the last 5 bits, OR that with high bits in BYTE-0, + and locate the rest in BYTE+1, BYTE+2, etc. */ +func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) { + /* This branch of the code can write up to 56 bits at a time, + 7 bits are lost by being perhaps already in *p and at least + 1 bit is needed to initialize the bit-stream ahead (i.e. if 7 + bits are in *p and we write 57 bits, then the next write will + access a byte that was never initialized). */ + p := array[*pos>>3:] + v := uint64(p[0]) + v |= bits << (*pos & 7) + binary.LittleEndian.PutUint64(p, v) + *pos += n_bits +} + +func writeSingleBit(bit bool, pos *uint, array []byte) { + if bit { + writeBits(1, 1, pos, array) + } else { + writeBits(1, 0, pos, array) + } +} + +func writeBitsPrepareStorage(pos uint, array []byte) { + assert(pos&7 == 0) + array[pos>>3] = 0 +} diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go new file mode 100644 index 00000000000..39feaef5217 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -0,0 +1,119 @@ +package brotli + +import ( + "errors" + "io" +) + +const ( + BestSpeed = 0 + BestCompression = 11 + DefaultCompression = 6 +) + +// WriterOptions configures Writer. +type WriterOptions struct { + // Quality controls the compression-speed vs compression-density trade-offs. + // The higher the quality, the slower the compression. Range is 0 to 11. + Quality int + // LGWin is the base 2 logarithm of the sliding window size. + // Range is 10 to 24. 0 indicates automatic configuration based on Quality. + LGWin int +} + +var ( + errEncode = errors.New("brotli: encode error") + errWriterClosed = errors.New("brotli: Writer is closed") +) + +// Writes to the returned writer are compressed and written to dst. +// It is the caller's responsibility to call Close on the Writer when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(dst io.Writer) *Writer { + return NewWriterLevel(dst, DefaultCompression) +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// The compression level can be DefaultCompression or any integer value between +// BestSpeed and BestCompression inclusive. +func NewWriterLevel(dst io.Writer, level int) *Writer { + return NewWriterOptions(dst, WriterOptions{ + Quality: level, + }) +} + +// NewWriterOptions is like NewWriter but specifies WriterOptions +func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer { + w := new(Writer) + w.options = options + w.Reset(dst) + return w +} + +// Reset discards the Writer's state and makes it equivalent to the result of +// its original state from NewWriter or NewWriterLevel, but writing to dst +// instead. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(dst io.Writer) { + encoderInitState(w) + w.params.quality = w.options.Quality + if w.options.LGWin > 0 { + w.params.lgwin = uint(w.options.LGWin) + } + w.dst = dst + w.err = nil +} + +func (w *Writer) writeChunk(p []byte, op int) (n int, err error) { + if w.dst == nil { + return 0, errWriterClosed + } + if w.err != nil { + return 0, w.err + } + + for { + availableIn := uint(len(p)) + nextIn := p + success := encoderCompressStream(w, op, &availableIn, &nextIn) + bytesConsumed := len(p) - int(availableIn) + p = p[bytesConsumed:] + n += bytesConsumed + if !success { + return n, errEncode + } + + if len(p) == 0 || w.err != nil { + return n, w.err + } + } +} + +// Flush outputs encoded data for all input provided to Write. The resulting +// output can be decoded to match all input before Flush, but the stream is +// not yet complete until after Close. +// Flush has a negative impact on compression. +func (w *Writer) Flush() error { + _, err := w.writeChunk(nil, operationFlush) + return err +} + +// Close flushes remaining data to the decorated writer. +func (w *Writer) Close() error { + // If stream is already closed, it is reported by `writeChunk`. + _, err := w.writeChunk(nil, operationFinish) + w.dst = nil + return err +} + +// Write implements io.Writer. Flush or Close must be called to ensure that the +// encoded bytes are actually flushed to the underlying Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + return w.writeChunk(p, operationProcess) +} + +type nopCloser struct { + io.Writer +} + +func (nopCloser) Close() error { return nil } diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 00000000000..8c03ec112a4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 00000000000..106569e542b --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 00000000000..aa73348c08d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 00000000000..31098dd57e5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 00000000000..38136af3e42 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 00000000000..4e2d6a709e2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 00000000000..504f1b37485 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = i.intervals[0] + default: + // Show the most recent finished interval if we have one + interval = i.intervals[n-2] + } + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 00000000000..0937f4aedf7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 00000000000..cf9def748e2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,278 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := labels[:0] + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 00000000000..0b7d6e4be43 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 00000000000..32a28c48378 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,141 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 00000000000..1bfffce46e2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 00000000000..6c0d284d2dd --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/asyncapi/converter-go/LICENSE b/vendor/github.com/asyncapi/converter-go/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/asyncapi/converter-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/asyncapi/converter-go/pkg/decode/decode.go b/vendor/github.com/asyncapi/converter-go/pkg/decode/decode.go new file mode 100644 index 00000000000..93f49d46f8c --- /dev/null +++ b/vendor/github.com/asyncapi/converter-go/pkg/decode/decode.go @@ -0,0 +1,92 @@ +package decode + +import ( + "gopkg.in/yaml.v3" + + "encoding/json" + "fmt" + "io" + "io/ioutil" +) + +type unmarshalFunc func([]byte, interface{}) error + +// FromJSON reads an AsyncAPI document from input in a JSON format +// and stores it in the value. If the operation fails, the function returns an error. +// +// See InvalidProperty, InvalidDocument, UnsupportedAsyncapiVersion in pkg/error. +func FromJSON(v interface{}, reader io.Reader) error { + return json.NewDecoder(reader).Decode(&v) +} + +// FromYaml reads an AsyncAPI document from input in the YAML format +// and stores it in the value. If the operation fails, the function returns an error. +// +// See InvalidProperty, InvalidDocument, UnsupportedAsyncapiVersion in pkg/error. +func FromYaml(v interface{}, reader io.Reader) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + err = unmarshalYaml(data, v) + if err != nil { + return err + } + return nil +} + +// FromJSONWithYamlFallback reads an AsyncAPI document from input in the JSON format. +// If the operation fails, the function tries to read the AsyncAPI document in the YAML format. +// If any of the decoding attempts succeeds, the result is stored in the value. +// If both decoding attempts fail, the function returns an error. +// +// See InvalidProperty, InvalidDocument, UnsupportedAsyncapiVersion in pkg/error. +func FromJSONWithYamlFallback(out interface{}, reader io.Reader) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + for _, unmarshal := range []unmarshalFunc{json.Unmarshal, unmarshalYaml} { + err = unmarshal(data, out) + if err == nil { + return nil + } + } + return err +} + +func unmarshalYaml(in []byte, out interface{}) error { + var result interface{} + if err := yaml.Unmarshal(in, &result); err != nil { + return err + } + *out.(*interface{}) = convertMap(result) + return nil +} + +func convertInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = convertMap(v) + } + return result +} + +func convertInterfaceMap(in map[interface{}]interface{}) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range in { + result[fmt.Sprintf("%v", k)] = convertMap(v) + } + return result +} + +func convertMap(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return convertInterfaceArray(v) + case map[interface{}]interface{}: + return convertInterfaceMap(v) + default: + return v + } +} diff --git a/vendor/github.com/asyncapi/parser-go/LICENSE b/vendor/github.com/asyncapi/parser-go/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/asyncapi/parser-go/pkg/decode/decode.go b/vendor/github.com/asyncapi/parser-go/pkg/decode/decode.go new file mode 100644 index 00000000000..1e6985a2a75 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/decode/decode.go @@ -0,0 +1,25 @@ +package decode + +import ( + "github.com/asyncapi/converter-go/pkg/decode" + "github.com/pkg/errors" + + "io" +) + +var ErrUnableToDecodeDocument = errors.New("unable to decode document") + +func ToMap(reader io.Reader) (map[string]interface{}, error) { + var jsonData interface{} + if err := decode.FromJSONWithYamlFallback(&jsonData, reader); err != nil { + return nil, err + } + var ( + result map[string]interface{} + ok bool + ) + if result, ok = jsonData.(map[string]interface{}); !ok { + return nil, ErrUnableToDecodeDocument + } + return result, nil +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/error/error.go b/vendor/github.com/asyncapi/parser-go/pkg/error/error.go new file mode 100644 index 00000000000..3f3aae8095e --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/error/error.go @@ -0,0 +1,44 @@ +package error + +import "strings" + +type Error struct { + errors []error +} + +var sep = "\n" + +func New(errs ...error) error { + if len(errs) < 1 { + return nil + } + return &Error{ + errors: errs, + } +} + +func Join(a []error, sep string) string { + switch len(a) { + case 0: + return "" + case 1: + return a[0].Error() + } + n := len(sep) * (len(a) - 1) + for i := 0; i < len(a); i++ { + n += len(a[i].Error()) + } + + var b strings.Builder + b.Grow(n) + b.WriteString(a[0].Error()) + for _, e := range a[1:] { + b.WriteString(sep) + b.WriteString(e.Error()) + } + return b.String() +} + +func (e Error) Error() string { + return Join(e.errors, sep) +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/loader.go b/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/loader.go new file mode 100644 index 00000000000..55c406a2e2d --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/loader.go @@ -0,0 +1,47 @@ +package jsonpath + +import ( + "github.com/asyncapi/parser-go/pkg/decode" + + "net/http" + "os" + "strings" +) + +type HttpClient interface { + Get(url string) (resp *http.Response, err error) +} + +type RefLoader func(string) (map[string]interface{}, error) + +func buildHttpLoader(client HttpClient) func(string) (map[string]interface{}, error) { + return func(url string) (map[string]interface{}, error) { + resp, err := client.Get(url) + if err != nil { + return nil, err + } + return decode.ToMap(resp.Body) + } +} + +func (l RefLoader) fileLoader(path string) (map[string]interface{}, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return decode.ToMap(file) +} + +func NewRefLoader(client HttpClient) RefLoader { + return buildHttpLoader(client) +} + +func (l RefLoader) Load(documentRef string) (map[string]interface{}, error) { + switch { + case strings.HasPrefix(documentRef, "http://") || strings.HasPrefix(documentRef, "https://"): + return l(documentRef) + default: + return l.fileLoader(documentRef) + } +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/reference.go b/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/reference.go new file mode 100644 index 00000000000..73ab62207a4 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/jsonpath/reference.go @@ -0,0 +1,143 @@ +package jsonpath + +import ( + "github.com/pkg/errors" + + "fmt" + "strings" +) + +var ( + ErrInvalidPath = errors.New("invalid path") + ErrInvalidKey = errors.New("key not found") + ErrInvalidEncoding = errors.New("invalid encoding") + ErrInvalidReference = errors.New("invalid reference") + decodeMap = map[uint8]rune{ + '0': '~', + '1': '/', + } + root = "" +) + +type Ref struct { + pointer string + uri string + path []string +} + +func NewRef(ref interface{}) (Ref, error) { + strRef := fmt.Sprintf("%v", ref) + uri, pointer, err := ParseRefStr(root, strRef) + if err != nil { + return Ref{}, err + } + return Ref{ + pointer: pointer, + uri: uri, + path: strings.Split(pointer, "/")[1:], + }, nil +} + +func (r Ref) String() string { + return fmt.Sprintf(`%s#%s`, r.uri, r.pointer) +} + +func (r Ref) URI() string { + return r.uri +} + +func (r Ref) Path() []string { + return r.path +} + +func (r Ref) NewChild(name interface{}) (Ref, error) { + childEncodedKey := EncodeEntryKey(fmt.Sprintf("%v", name)) + return NewRef(fmt.Sprintf("%s/%s", r.String(), childEncodedKey)) +} + +func DecodeEntryKey(name string) (string, error) { + if name == "~" || name == "/" { + return "", errors.Wrap(ErrInvalidEncoding, name) + } + nameLen := len(name) + if nameLen < 2 { + return name, nil + } + builder := strings.Builder{} + for i := 0; i < nameLen; i++ { + if name[i] != '~' { + builder.WriteRune(rune(name[i])) + continue + } + switch name[i+1] { + case '0', '1': + builder.WriteRune(decodeMap[name[i+1]]) + i++ + continue + default: + return "", errors.Wrap(ErrInvalidEncoding, name) + } + } + return builder.String(), nil +} + +func EncodeEntryKey(name string) string { + builder := strings.Builder{} + for _, r := range name { + switch r { + case '~': + builder.WriteString("~0") + case '/': + builder.WriteString("~1") + default: + builder.WriteRune(r) + } + } + return builder.String() +} + +func getValue(key string, v interface{}) (interface{}, error) { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, ErrInvalidKey + } + result, exist := m[key] + if !exist { + return nil, ErrInvalidKey + } + return result, nil +} + +func GetRefObject(path []string, v interface{}) (map[string]interface{}, error) { + if len(path) < 1 { + return nil, ErrInvalidPath + } + var ( + current = v + err error + ) + for _, key := range path { + current, err = getValue(key, current) + if err != nil { + return nil, err + } + } + result, ok := current.(map[string]interface{}) + if !ok { + return nil, ErrInvalidReference + } + return result, nil +} + +func ParseRefStr(docName string, strRef string) (string, string, error) { + index := strings.IndexByte(strRef, '#') + if index < 0 { + return "", "", errors.Wrapf(ErrInvalidReference, "%s#%s", docName, strRef) + } + document := strRef[:index] + if document == "" { + document = docName + } + pointer := strRef[index+1:] + return document, pointer, nil +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/parser/parser.go b/vendor/github.com/asyncapi/parser-go/pkg/parser/parser.go new file mode 100644 index 00000000000..b61fb62bc81 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/parser/parser.go @@ -0,0 +1,131 @@ +package parser + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" + "os" + + "github.com/asyncapi/parser-go/pkg/decode" + "github.com/asyncapi/parser-go/pkg/jsonpath" + hlsp "github.com/asyncapi/parser-go/pkg/parser/v2" + "github.com/asyncapi/parser-go/pkg/schema" + asyncapi "github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2" + openapi "github.com/asyncapi/parser-go/pkg/schema/openapi/v2" +) + +// Parser parses an AsyncAPI document. +type Parser = func(io.Reader, io.Writer) error + +type MessageProcessor func(map[string]interface{}) error + +type EncoderOpts func(*json.Encoder) error + +func (mp MessageProcessor) BuildParser(encoderOpts ...EncoderOpts) Parser { + return func(reader io.Reader, writer io.Writer) error { + // fetch document from reader + var err error + var jsonData map[string]interface{} + jsonData, err = decode.ToMap(reader) + if err != nil { + return err + } + // parse asyncapi schema + refLoader := jsonpath.NewRefLoader(http.DefaultClient) + hlsParser := hlsp.NewParser(refLoader, "#/components/schemas") + err = hlsParser.Parse(jsonData) + if err != nil { + return err + } + // parse supported schemas + err = mp(jsonData) + if err != nil { + return err + } + if writer == nil { + return nil + } + encoder := json.NewEncoder(writer) + for _, opt := range encoderOpts { + if err := opt(encoder); err != nil { + return err + } + } + return encoder.Encode(jsonData) + } +} + +func isURL(str string) bool { + u, err := url.Parse(str) + return err == nil && u.Scheme != "" && u.Host != "" +} + +func isLocalFile(filepath string) bool { + _, err := os.Stat(filepath) + return err == nil +} + +func NewReader(doc string) (io.Reader, error) { + r := bytes.NewBuffer(nil) + if isURL(doc) { + // TODO create a HTTP client with a timeout + resp, err := http.Get(doc) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + _, err = io.Copy(r, resp.Body) + return r, err + } + + if isLocalFile(doc) { + f, err := os.Open(doc) + if err != nil { + return nil, err + } + + defer f.Close() + + _, err = io.Copy(r, f) + return r, err + } + + // Otherwise, doc is considered as the file content in plain text. + r.WriteString(doc) + + return r, nil +} + +func New() (Parser, error) { + schemaParsers := []struct { + parse schema.ParseMessage + labels []string + }{ + { + openapi.Parse, + openapi.Labels, + }, + { + asyncapi.Parse, + asyncapi.Labels, + }, + } + + d := asyncapi.Dispatcher{} + for _, schemaParser := range schemaParsers { + if err := d.Add(schemaParser.parse, schemaParser.labels...); err != nil { + return nil, err + } + } + + messageProcessor := MessageProcessor(asyncapi.BuildMessageProcessor(d)) + parser := messageProcessor.BuildParser(func(encoder *json.Encoder) error { + encoder.SetIndent("", " ") + return nil + }) + + return parser, nil +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/parser/v2/hlsp.go b/vendor/github.com/asyncapi/parser-go/pkg/parser/v2/hlsp.go new file mode 100644 index 00000000000..1d327715f2c --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/parser/v2/hlsp.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "strings" + + parserErrors "github.com/asyncapi/parser-go/pkg/error" + "github.com/asyncapi/parser-go/pkg/jsonpath" + v2 "github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2" + + "github.com/pkg/errors" +) + +type Parser struct { + jsonpath.RefLoader + root string + documents map[string]map[string]interface{} + referenceTrack map[string]bool + blackListedPathMap map[string]bool +} + +var ErrCircularDependency = errors.New("circular dependency") + +func (p Parser) Parse(doc map[string]interface{}) error { + // validate document against schema + err := v2.Parse(doc) + if err != nil { + return err + } + ref, err := jsonpath.NewRef("#") + if err != nil { + return err + } + p.root = ref.URI() + p.documents[p.root] = doc + documentErrors := p.dereference(ref, p.documents[p.root]) + return parserErrors.New(documentErrors...) +} + +func (p *Parser) dereferenceMap(rootRef jsonpath.Ref, v *map[string]interface{}) []error { + var errs []error + for key, value := range *v { + //Here be Dragons! + if "$ref" == key { + // need to track visited nodes in order to avoid circular dependencies + if reported, found := p.referenceTrack[rootRef.String()]; found { + if !reported { + errs = append(errs, errors.Wrap(ErrCircularDependency, rootRef.String())) + p.referenceTrack[rootRef.String()] = true + } + continue + } + p.referenceTrack[rootRef.String()] = false + + // to allow recursive de-referencing, prepend the current root uri on local references + if s, ok := value.(string); ok && strings.HasPrefix(s, "#") { + value = rootRef.URI() + s + } + + refKey, err := jsonpath.NewRef(value) + if err != nil { + errs = append(errs, err) + continue + } + _, ok := p.documents[refKey.URI()] + if !ok { + refDoc, err := p.RefLoader.Load(refKey.URI()) + if err != nil { + errs = append(errs, err) + continue + } + p.documents[refKey.URI()] = refDoc + } + refMap, err := jsonpath.GetRefObject(refKey.Path(), p.documents[refKey.URI()]) + if err != nil { + errs = append(errs, errors.Wrap(err, refKey.String())) + continue + } + refErrs := p.dereference(refKey, refMap) + if len(refErrs) > 0 { + errs = append(errs, refErrs...) + continue + } + delete(*v, key) + // inject resolved reference + for refKey, refValue := range refMap { + (*v)[refKey] = refValue + } + continue + } + itemRef, err := rootRef.NewChild(key) + if err != nil { + return append(errs, err) + } + if _, found := p.blackListedPathMap[itemRef.String()]; found { + continue + } + errs = append(errs, p.dereference(itemRef, value)...) + } + return errs +} + +func (p Parser) dereference(ref jsonpath.Ref, v interface{}) []error { + switch v := v.(type) { + case []interface{}: + return p.dereferenceArray(ref, v) + case map[string]interface{}: + return p.dereferenceMap(ref, &v) + default: + return nil + } +} + +func (p Parser) dereferenceArray(ref jsonpath.Ref, v []interface{}) []error { + var errs []error + for i, v := range v { + childRef, err := ref.NewChild(i) + if err != nil { + errs = append(errs, err) + } + errs = append(errs, p.dereference(childRef, v)...) + } + return errs +} + +func NewParser(refLoader jsonpath.RefLoader, blackListedPaths ...string) Parser { + blackListedPathMap := make(map[string]bool) + for _, key := range blackListedPaths { + blackListedPathMap[key] = true + } + return Parser{ + RefLoader: refLoader, + documents: make(map[string]map[string]interface{}), + referenceTrack: make(map[string]bool), + blackListedPathMap: blackListedPathMap, + } +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/message_processor.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/message_processor.go new file mode 100644 index 00000000000..275874813ae --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/message_processor.go @@ -0,0 +1,117 @@ +package v2 + +import ( + parserErrors "github.com/asyncapi/parser-go/pkg/error" + + "github.com/pkg/errors" + + "fmt" +) + +var ErrInvalidValue = errors.New("invalid value") + +func schemaFormat(m map[string]interface{}) string { + schemaFormat, found := m["schemaFormat"] + if !found { + return "" + } + return fmt.Sprintf("%v", schemaFormat) +} + +type Dispatcher map[string]func(interface{}) error + +func (d Dispatcher) do(messages []map[string]interface{}) error { + var errs []error + for _, msg := range messages { + schemaFormat := schemaFormat(msg) + pm, found := d[schemaFormat] + if !found { + continue + } + err := pm(msg) + if err != nil { + errs = append(errs, err) + } + } + return parserErrors.New(errs...) +} + +func (d *Dispatcher) Add(pm func(interface{}) error, labels ...string) error { + for _, key := range labels { + (*d)[key] = pm + } + return nil +} + +func BuildMessageProcessor(dispatcher Dispatcher) func(map[string]interface{}) error { + return func(doc map[string]interface{}) error { + var errs []error + channels, found := (doc)["channels"].(map[string]interface{}) + if !found { + return errors.Wrap(ErrInvalidValue, "channels") + } + for _, channel := range channels { + chanMessages, err := extractMessages(channel) + if err != nil { + errs = append(errs, err) + continue + } + if err := dispatcher.do(chanMessages); err != nil { + errs = append(errs, err) + } + } + return parserErrors.New(errs...) + } +} + +func extractMessages(channel interface{}) ([]map[string]interface{}, error) { + var ( + messages []map[string]interface{} + errs []error + ) + channelMap, ok := channel.(map[string]interface{}) + if !ok { + return nil, errors.Wrap(ErrInvalidValue, "channel") + } + for _, key := range []string{"publish", "subscribe"} { + channel, ok := channelMap[key].(map[string]interface{}) + if ok { + pubMsg, err := extractMessage(channel["message"]) + switch err != nil { + case true: + errs = append(errs, err) + default: + messages = append(messages, pubMsg...) + } + } + } + return messages, parserErrors.New(errs...) +} + +func extractMessage(message interface{}) ([]map[string]interface{}, error) { + msg, ok := message.(map[string]interface{}) + if !ok { + return nil, errors.Wrap(ErrInvalidValue, "message") + } + oneOf, ok := msg["oneOf"] + if !ok { + return []map[string]interface{}{ + msg, + }, nil + } + oneOfList, ok := oneOf.([]interface{}) + if !ok { + return nil, errors.Wrap(ErrInvalidValue, "oneOf") + } + var result []map[string]interface{} + var errs []error + for _, msg := range oneOfList { + msgMap, ok := msg.(map[string]interface{}) + if !ok { + errs = append(errs, errors.Wrap(ErrInvalidValue, "message")) + continue + } + result = append(result, msgMap) + } + return result, parserErrors.New(errs...) +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/schema_parser.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/schema_parser.go new file mode 100644 index 00000000000..b6f50767aaf --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/asyncapi/v2/schema_parser.go @@ -0,0 +1,51 @@ +package v2 + +import ( + "fmt" + + "github.com/pkg/errors" + + parseSchema "github.com/asyncapi/parser-go/pkg/schema" + schemas "github.com/asyncapi/spec-json-schemas/v2" +) + +var ( + Labels = []string{"asyncapi"} + parsers = make(map[string]*parseSchema.Parser) +) + +// Parse parsers a document. +func Parse(v interface{}) error { + version, err := extractVersion(v) + if err != nil { + return errors.Wrap(err, "error extracting AsyncAPI Spec version from provided document") + } + + if parsers[version] == nil { + s, err := schemas.Get(version) + if err != nil { + return err + } + + if s == nil { + return fmt.Errorf("version %q is not supported", version) + } + + parsers[version] = parseSchema.NewParser(s) + } + + return parsers[version].Parse(v) +} + +func extractVersion(v interface{}) (string, error) { + switch doc := v.(type) { + case map[string]interface{}: + if doc["asyncapi"] == nil { + return "", errors.New("the `asyncapi` field is missing") + } + + return doc["asyncapi"].(string), nil + default: + return "", errors.New("only map[string]interface{} type is supported") + } +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/jsonschma.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/jsonschma.go new file mode 100644 index 00000000000..867c90b15ce --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/jsonschma.go @@ -0,0 +1,9 @@ +package draft07 + +import parseSchema "github.com/asyncapi/parser-go/pkg/schema" + +var parser = parseSchema.NewParser(schema) + +func Parse(v interface{}) error { + return parser.Parse(v) +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/schema.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/schema.go new file mode 100644 index 00000000000..ea6287c765e --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07/schema.go @@ -0,0 +1,3 @@ +package draft07 + +var schema = []byte(`{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`) diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/openapi.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/openapi.go new file mode 100644 index 00000000000..ca7b8993ebf --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/openapi.go @@ -0,0 +1,189 @@ +package v2 + +import ( + parserErrors "github.com/asyncapi/parser-go/pkg/error" + parserSchema "github.com/asyncapi/parser-go/pkg/schema" + "github.com/asyncapi/parser-go/pkg/schema/jsonschema/draft07" + + "github.com/pkg/errors" + + "fmt" +) + +var ( + ErrInvalidSchema = errors.New("invalid OpenAPI/AsyncAPI schema") + _ parserSchema.ParseMessage = Parse + openapiSchemaParser = parserSchema.NewParser(schema) + null = "null" + Labels = []string{ + "", + "openapi", + "application/vnd.oai.openapi", + "application/vnd.asyncapi", + } +) + +func Parse(data interface{}) error { + message, ok := data.(*map[string]interface{}) + if !ok { + return nil + } + payload, found := (*message)["payload"] + if !found { + return nil + } + if err := openapiSchemaParser.Parse(payload); err != nil { + // not a boolean value or object + return err + } + schema, ok := payload.(map[string]interface{}) + if !ok { + // a boolean value + return nil + } + return reduceAndCorrectSchema(&schema) +} + +func reduceAndCorrectSchema(schema *map[string]interface{}) error { + var errs []error + for _, fn := range []func(*map[string]interface{}) error{ + reduceExample, + reduceAndCorrectProperties, + reduceAndCorrectAdditionalProperties, + correctType, + } { + err := fn(schema) + if err != nil { + errs = append(errs, err) + } + } + return parserErrors.New(errs...) +} + +func reduceExample(schema *map[string]interface{}) error { + example := mapValue("example", schema) + if example == nil { + return nil + } + examples := mapSliceValue("examples", schema) + examples = append(examples, example) + (*schema)["examples"] = examples + delete(*schema, "example") + return nil +} + +func reduceAndCorrectProperties(schema *map[string]interface{}) error { + properties := mapObjectValue("properties", schema) + if properties == nil { + return nil + } + var errs []error + for k, v := range *properties { + property, ok := v.(map[string]interface{}) + if !ok { + errs = append(errs, errors.Wrap(ErrInvalidSchema, k)) + continue + } + err := draft07.Parse(property) + if err != nil { + // property is not a schema + continue + } + err = reduceAndCorrectSchema(&property) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 1 { + return parserErrors.New(errs...) + } + (*schema)["properties"] = properties + return nil +} + +func reduceAndCorrectAdditionalProperties(schema *map[string]interface{}) error { + additionalProperties := mapObjectValue("additionalProperties", schema) + if additionalProperties == nil { + return nil + } + err := draft07.Parse(additionalProperties) + if err != nil { + // additional property is not a schema + return nil + } + err = reduceAndCorrectSchema(additionalProperties) + if err != nil { + return err + } + (*schema)["additionalProperties"] = additionalProperties + return nil +} + +func mapValue(key string, v interface{}) interface{} { + value, ok := v.(*map[string]interface{}) + if !ok { + return nil + } + return (*value)[key] +} + +func mapSliceValue(key string, v interface{}) []interface{} { + value := mapValue(key, v) + if value == nil { + return nil + } + sliceValue, ok := value.([]interface{}) + if !ok { + return nil + } + return sliceValue +} + +func mapObjectValue(key string, v interface{}) *map[string]interface{} { + value := mapValue(key, v) + if value == nil { + return nil + } + objectValue, ok := value.(map[string]interface{}) + if !ok { + return nil + } + return &objectValue +} + +func correctType(schema *map[string]interface{}) error { + nullable, ok := (*schema)["nullable"].(bool) + if !ok || !nullable { + return nil + } + schemaType := mapValue("type", schema) + schemaTypeSlice, ok := schemaType.([]interface{}) + if !ok { + (*schema)["type"] = toStringSlice(schemaType) + delete(*schema, "nullable") + return nil + } + if containsNull(schemaTypeSlice) { + return nil + } + (*schema)["type"] = append(schemaTypeSlice, null) + delete(*schema, "nullable") + return nil +} + +func toStringSlice(v interface{}) []interface{} { + if v == nil || fmt.Sprintf("%v", v) == null { + return []interface{}{null} + } + return []interface{}{v, null} +} + +func containsNull(typeSlice []interface{}) bool { + for _, itemType := range typeSlice { + typeStr := fmt.Sprintf("%v", itemType) + if typeStr == null { + return true + } + } + return false +} diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/schema.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/schema.go new file mode 100644 index 00000000000..1f261d52d55 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/openapi/v2/schema.go @@ -0,0 +1,3 @@ +package v2 + +var schema = []byte(`{"title":"OpenAPI schema","$schema":"http://json-schema.org/draft-04/schema#","description":"A deterministic version of a JSON Schema object.","type":"object","additionalProperties":false,"patternProperties":{"^x-[\\w\\d\\.\\-\\_]+$":{"$ref":"#/definitions/specificationExtension"}},"properties":{"$ref":{"$ref":"#/definitions/ReferenceObject"},"format":{"type":"string"},"nullable":{"type":"boolean","default":false},"title":{"$ref":"http://json-schema.org/draft-04/schema#/properties/title"},"description":{"$ref":"http://json-schema.org/draft-04/schema#/properties/description"},"default":{"$ref":"http://json-schema.org/draft-04/schema#/properties/default"},"multipleOf":{"$ref":"http://json-schema.org/draft-04/schema#/properties/multipleOf"},"maximum":{"$ref":"http://json-schema.org/draft-04/schema#/properties/maximum"},"exclusiveMaximum":{"$ref":"http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"},"minimum":{"$ref":"http://json-schema.org/draft-04/schema#/properties/minimum"},"exclusiveMinimum":{"$ref":"http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"},"maxLength":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveInteger"},"minLength":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"},"pattern":{"$ref":"http://json-schema.org/draft-04/schema#/properties/pattern"},"maxItems":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveInteger"},"minItems":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"},"uniqueItems":{"$ref":"http://json-schema.org/draft-04/schema#/properties/uniqueItems"},"maxProperties":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveInteger"},"minProperties":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"},"required":{"$ref":"http://json-schema.org/draft-04/schema#/definitions/stringArray"},"enum":{"$ref":"http://json-schema.org/draft-04/schema#/properties/enum"},"deprecated":{"type":"boolean","default":false},"additionalProperties":{"anyOf":[{"$ref":"#"},{"type":"boolean"}],"default":{}},"type":{"$ref":"http://json-schema.org/draft-04/schema#/properties/type"},"items":{"anyOf":[{"$ref":"#"},{"type":"array","minItems":1,"items":{"$ref":"#"}}],"default":{}},"allOf":{"type":"array","minItems":1,"items":{"$ref":"#"}},"oneOf":{"type":"array","minItems":2,"items":{"$ref":"#"}},"anyOf":{"type":"array","minItems":2,"items":{"$ref":"#"}},"not":{"$ref":"#"},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"discriminator":{"type":"string"},"readOnly":{"type":"boolean","default":false},"xml":{"$ref":"#/definitions/xml"},"externalDocs":{"$ref":"#/definitions/externalDocs"},"example":{},"examples":{"type":"array","items":{}}},"definitions":{"ReferenceObject":{"type":"string","format":"uri"},"specificationExtension":{"description":"Any property starting with x- is valid.","additionalProperties":true,"additionalItems":true},"xml":{"type":"object","additionalProperties":false,"properties":{"name":{"type":"string"},"namespace":{"type":"string"},"prefix":{"type":"string"},"attribute":{"type":"boolean","default":false},"wrapped":{"type":"boolean","default":false}}},"externalDocs":{"type":"object","additionalProperties":false,"description":"information about external documentation","required":["url"],"properties":{"description":{"type":"string"},"url":{"type":"string","format":"uri"}},"patternProperties":{"^x-[\\w\\d\\.\\-\\_]+$":{"$ref":"#/definitions/specificationExtension"}}}}}`) diff --git a/vendor/github.com/asyncapi/parser-go/pkg/schema/parser.go b/vendor/github.com/asyncapi/parser-go/pkg/schema/parser.go new file mode 100644 index 00000000000..d767b247068 --- /dev/null +++ b/vendor/github.com/asyncapi/parser-go/pkg/schema/parser.go @@ -0,0 +1,42 @@ +package schema + +import ( + parserErrors "github.com/asyncapi/parser-go/pkg/error" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" +) + +type ParseMessage func(interface{}) error + +var _ ParseMessage = (&Parser{}).Parse + +// Parser parses a given structure and validates it against a JSON Schema. +type Parser struct { + schemaLoader gojsonschema.JSONLoader +} + +// NewParser creates a new Parser. +func NewParser(schema []byte) *Parser { + return &Parser{ + schemaLoader: gojsonschema.NewBytesLoader(schema), + } +} + +// Parse is the main function to parse a given structure and validate it against a JSON Schema. +// Implements ParseMessage. +func (p *Parser) Parse(data interface{}) error { + documentLoader := gojsonschema.NewGoLoader(data) + result, err := gojsonschema.Validate(p.schemaLoader, documentLoader) + if err != nil { + return err + } + if !result.Valid() { + var errs []error + for _, err := range result.Errors() { + errs = append(errs, errors.New(err.String())) + } + return parserErrors.New(errs...) + } + return nil +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/.gitignore b/vendor/github.com/asyncapi/spec-json-schemas/v2/.gitignore new file mode 100644 index 00000000000..d4e62d0df5e --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/.gitignore @@ -0,0 +1,4 @@ +node_modules +.nyc_output +.vscode +coverage diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/.npmignore b/vendor/github.com/asyncapi/spec-json-schemas/v2/.npmignore new file mode 100644 index 00000000000..2c6f450e224 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/.npmignore @@ -0,0 +1,5 @@ +.github +test +go.mod +go.sum +*.go diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/CODEOWNERS b/vendor/github.com/asyncapi/spec-json-schemas/v2/CODEOWNERS new file mode 100644 index 00000000000..864dbb4be21 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/CODEOWNERS @@ -0,0 +1,9 @@ +# This file provides an overview of code owners in this repository. + +# Each line is a file pattern followed by one or more owners. +# The last matching pattern has the most precedence. +# For more details, read the following article on GitHub: https://help.github.com/articles/about-codeowners/. + +# The default owners are automatically added as reviewers when you open a pull request unless different owners are specified in the file. + +* @fmvilas @derberg @dalelane @asyncapi-bot-eve \ No newline at end of file diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/LICENSE b/vendor/github.com/asyncapi/spec-json-schemas/v2/LICENSE new file mode 100644 index 00000000000..1fe9f90113e --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +   Copyright The Linux Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/README.md b/vendor/github.com/asyncapi/spec-json-schemas/v2/README.md new file mode 100644 index 00000000000..93fd25aac53 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/README.md @@ -0,0 +1,65 @@ +![npm](https://img.shields.io/npm/v/@asyncapi/specs?style=for-the-badge) ![npm](https://img.shields.io/npm/dt/@asyncapi/specs?style=for-the-badge) + +# AsyncAPI + +This package provides all the versions of the AsyncAPI schema. + +## Installation + +### NodeJS +```bash +npm install @asyncapi/specs +``` + +### Go +```bash +go get github.com/asyncapi/spec-json-schemas/v2 +``` + +## Usage + +### NodeJS + +Grab a specific AsyncAPI version: + +```js +const asyncapi = require('@asyncapi/specs/schemas/2.0.0'); + +// Do something with the schema. +``` + +Get a list of versions: + +```js +const versions = require('@asyncapi/specs'); + +console.log(versions); +// Outputs: +// +// { +// '1.0.0': [Object], +// '1.1.0': [Object] +// } + +const asyncapi = versions['1.1.0']; + +// Do something with the schema. +``` + +### Go + +Grab a specific AsyncAPI version: + +```go +import "github.com/asyncapi/spec_json_schemas/v2" + +func Do() { + schema, err := spec_json_schemas.Get("1.1.0") + if err != nil { + panic(err) + } + + // Do something with the schema +} + +``` \ No newline at end of file diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/go.mod b/vendor/github.com/asyncapi/spec-json-schemas/v2/go.mod new file mode 100644 index 00000000000..9d5c91a8e83 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/go.mod @@ -0,0 +1,11 @@ +module github.com/asyncapi/spec-json-schemas/v2 + +go 1.17 + +require github.com/stretchr/testify v1.7.0 + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/go.sum b/vendor/github.com/asyncapi/spec-json-schemas/v2/go.sum new file mode 100644 index 00000000000..acb88a48f68 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/index.js b/vendor/github.com/asyncapi/spec-json-schemas/v2/index.js new file mode 100644 index 00000000000..6a4f5e665d6 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/index.js @@ -0,0 +1,12 @@ +module.exports = { + '1.0.0': require('./schemas/1.0.0.json'), + '1.1.0': require('./schemas/1.1.0.json'), + '1.2.0': require('./schemas/1.2.0.json'), + '2.0.0-rc1': require('./schemas/2.0.0-rc1.json'), + '2.0.0-rc2': require('./schemas/2.0.0-rc2.json'), + '2.0.0': require('./schemas/2.0.0.json'), + '2.1.0': require('./schemas/2.1.0.json'), + '2.2.0': require('./schemas/2.2.0.json'), + '2.3.0': require('./schemas/2.3.0.json'), + '2.4.0': require('./schemas/2.4.0.json'), +}; diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/package-lock.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/package-lock.json new file mode 100644 index 00000000000..b5dcba92362 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/package-lock.json @@ -0,0 +1,18851 @@ +{ + "name": "@asyncapi/specs", + "version": "2.14.0-2022-04-release.3", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "@asyncapi/specs", + "version": "2.14.0-2022-04-release.2", + "license": "Apache-2.0", + "devDependencies": { + "@semantic-release/commit-analyzer": "^8.0.1", + "@semantic-release/github": "7.2.3", + "@semantic-release/npm": "^7.0.3", + "@semantic-release/release-notes-generator": "^9.0.1", + "conventional-changelog-conventionalcommits": "^4.2.3", + "mocha": "^8.2.1", + "nyc": "^15.1.0", + "semantic-release": "17.4.3" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", + "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.8.3" + } + }, + "node_modules/@babel/core": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.13.tgz", + "integrity": "sha512-BQKE9kXkPlXHPeqissfxo0lySWJcYdEP0hdtJOH/iJfDdhOCcgtNCjftCJg3qqauB4h+lz2N6ixM++b9DN1Tcw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@babel/generator": "^7.12.13", + "@babel/helper-module-transforms": "^7.12.13", + "@babel/helpers": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.12.13" + } + }, + "node_modules/@babel/core/node_modules/@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "node_modules/@babel/core/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@babel/generator": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.13.tgz", + "integrity": "sha512-9qQ8Fgo8HaSvHEt6A5+BATP7XktD/AdAnObUeTRz5/e2y3kbrxZgz32qUJJsdmwUvBJzF4AeV21nGTNwv05Mpw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13", + "jsesc": "^2.5.1", + "source-map": "^0.5.0" + } + }, + "node_modules/@babel/generator/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", + "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", + "dev": true, + "dependencies": { + "@babel/helper-get-function-arity": "^7.12.13", + "@babel/template": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-get-function-arity": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", + "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.13.tgz", + "integrity": "sha512-B+7nN0gIL8FZ8SvMcF+EPyB21KnCcZHQZFczCxbiNGV/O0rsrSBlWGLzmtBJ3GMjSVMIm4lpFhR+VdVBuIsUcQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.13.tgz", + "integrity": "sha512-NGmfvRp9Rqxy0uHSSVP+SRIW1q31a7Ji10cLBcqSDUngGentY4FRiHOFZFE1CLU5eiL0oE8reH7Tg1y99TDM/g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.13.tgz", + "integrity": "sha512-acKF7EjqOR67ASIlDTupwkKM1eUisNAjaSduo5Cz+793ikfnpe7p4Q7B7EWU2PCoSTPWsQkR7hRUWEIZPiVLGA==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.12.13", + "@babel/helper-replace-supers": "^7.12.13", + "@babel/helper-simple-access": "^7.12.13", + "@babel/helper-split-export-declaration": "^7.12.13", + "@babel/helper-validator-identifier": "^7.12.11", + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13", + "lodash": "^4.17.19" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", + "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.13.tgz", + "integrity": "sha512-pctAOIAMVStI2TMLhozPKbf5yTEXc0OJa0eENheb4w09SrgOWEs+P4nTOZYJQCqs8JlErGLDPDJTiGIp3ygbLg==", + "dev": true, + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.12.13", + "@babel/helper-optimise-call-expression": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.13.tgz", + "integrity": "sha512-0ski5dyYIHEfwpWGx5GPWhH35j342JaflmCeQmsPWcrOQDtCN6C1zKAVRFVbK53lPW2c9TsuLLSUDf0tIGJ5hA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", + "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.12.11", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", + "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==", + "dev": true + }, + "node_modules/@babel/helpers": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.13.tgz", + "integrity": "sha512-oohVzLRZ3GQEk4Cjhfs9YkJA4TdIDTObdBEZGrd6F/T0GPSnuV6l22eMcxlvcvzVIPH3VTtxbseudM1zIE+rPQ==", + "dev": true, + "dependencies": { + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/highlight": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", + "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", + "dev": true, + "dependencies": { + "chalk": "^2.0.0", + "esutils": "^2.0.2", + "js-tokens": "^4.0.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.13.tgz", + "integrity": "sha512-z7n7ybOUzaRc3wwqLpAX8UFIXsrVXUJhtNGBwAnLz6d1KUapqyq7ad2La8gZ6CXhHmGAIL32cop8Tst4/PNWLw==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/template": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", + "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "node_modules/@babel/template/node_modules/@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.12.13" + } + }, + "node_modules/@babel/template/node_modules/@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.13.tgz", + "integrity": "sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@babel/generator": "^7.12.13", + "@babel/helper-function-name": "^7.12.13", + "@babel/helper-split-export-declaration": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/types": "^7.12.13", + "debug": "^4.1.0", + "globals": "^11.1.0", + "lodash": "^4.17.19" + } + }, + "node_modules/@babel/traverse/node_modules/@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.12.13" + } + }, + "node_modules/@babel/traverse/node_modules/@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.13.tgz", + "integrity": "sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.12.11", + "lodash": "^4.17.19", + "to-fast-properties": "^2.0.0" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.2.tgz", + "integrity": "sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz", + "integrity": "sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.4", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz", + "integrity": "sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz", + "integrity": "sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.4", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@octokit/auth-token": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.5.tgz", + "integrity": "sha512-BpGYsPgJt05M7/L/5FoE1PiAbdxXFZkX/3kDYcsvd1v6UhlnE5e96dTDr0ezX/EFwciQxf3cNV0loipsURU+WA==", + "dev": true, + "dependencies": { + "@octokit/types": "^6.0.3" + } + }, + "node_modules/@octokit/core": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.4.0.tgz", + "integrity": "sha512-6/vlKPP8NF17cgYXqucdshWqmMZGXkuvtcrWCgU5NOI0Pl2GjlmZyWgBMrU8zJ3v2MJlM6++CiB45VKYmhiWWg==", + "dev": true, + "dependencies": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.4.12", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/endpoint": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.11.tgz", + "integrity": "sha512-fUIPpx+pZyoLW4GCs3yMnlj2LfoXTWDUVPTC4V3MUEKZm48W+XYpeWSZCv+vYF1ZABUm2CqnDVf1sFtIYrj7KQ==", + "dev": true, + "dependencies": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/graphql": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.6.2.tgz", + "integrity": "sha512-WmsIR1OzOr/3IqfG9JIczI8gMJUMzzyx5j0XXQ4YihHtKlQc+u35VpVoOXhlKAlaBntvry1WpAzPl/a+s3n89Q==", + "dev": true, + "dependencies": { + "@octokit/request": "^5.3.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-7.0.0.tgz", + "integrity": "sha512-gV/8DJhAL/04zjTI95a7FhQwS6jlEE0W/7xeYAzuArD0KVAVWDLP2f3vi98hs3HLTczxXdRK/mF0tRoQPpolEw==", + "dev": true + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "2.13.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.13.3.tgz", + "integrity": "sha512-46lptzM9lTeSmIBt/sVP/FLSTPGx6DCzAdSX3PfeJ3mTf4h9sGC26WpaQzMEq/Z44cOcmx8VsOhO+uEgE3cjYg==", + "dev": true, + "dependencies": { + "@octokit/types": "^6.11.0" + }, + "peerDependencies": { + "@octokit/core": ">=2" + } + }, + "node_modules/@octokit/plugin-request-log": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.3.tgz", + "integrity": "sha512-4RFU4li238jMJAzLgAwkBAw+4Loile5haQMQr+uhFq27BmyJXcXSKvoQKqh0agsZEiUlW6iSv3FAgvmGkur7OQ==", + "dev": true, + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.0.1.tgz", + "integrity": "sha512-vvWbPtPqLyIzJ7A4IPdTl+8IeuKAwMJ4LjvmqWOOdfSuqWQYZXq2CEd0hsnkidff2YfKlguzujHs/reBdAx8Sg==", + "dev": true, + "dependencies": { + "@octokit/types": "^6.13.1", + "deprecation": "^2.3.1" + }, + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/request": { + "version": "5.4.15", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.4.15.tgz", + "integrity": "sha512-6UnZfZzLwNhdLRreOtTkT9n57ZwulCve8q3IT/Z477vThu6snfdkBuhxnChpOKNGxcQ71ow561Qoa6uqLdPtag==", + "dev": true, + "dependencies": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.0.0", + "@octokit/types": "^6.7.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.1", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/request-error": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.0.5.tgz", + "integrity": "sha512-T/2wcCFyM7SkXzNoyVNWjyVlUwBvW3igM3Btr/eKYiPmucXTtkxt2RBsf6gn3LTzaLSLTQtNmvg+dGsOxQrjZg==", + "dev": true, + "dependencies": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "node_modules/@octokit/rest": { + "version": "18.5.3", + "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-18.5.3.tgz", + "integrity": "sha512-KPAsUCr1DOdLVbZJgGNuE/QVLWEaVBpFQwDAz/2Cnya6uW2wJ/P5RVGk0itx7yyN1aGa8uXm2pri4umEqG1JBA==", + "dev": true, + "dependencies": { + "@octokit/core": "^3.2.3", + "@octokit/plugin-paginate-rest": "^2.6.2", + "@octokit/plugin-request-log": "^1.0.2", + "@octokit/plugin-rest-endpoint-methods": "5.0.1" + } + }, + "node_modules/@octokit/types": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.14.2.tgz", + "integrity": "sha512-wiQtW9ZSy4OvgQ09iQOdyXYNN60GqjCL/UdMsepDr1Gr0QzpW6irIKbH3REuAHXAhxkEk9/F2a3Gcs1P6kW5jA==", + "dev": true, + "dependencies": { + "@octokit/openapi-types": "^7.0.0" + } + }, + "node_modules/@semantic-release/commit-analyzer": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-8.0.1.tgz", + "integrity": "sha512-5bJma/oB7B4MtwUkZC2Bf7O1MHfi4gWe4mA+MIQ3lsEV0b422Bvl1z5HRpplDnMLHH3EXMoRdEng6Ds5wUqA3A==", + "dev": true, + "dependencies": { + "conventional-changelog-angular": "^5.0.0", + "conventional-commits-filter": "^2.0.0", + "conventional-commits-parser": "^3.0.7", + "debug": "^4.0.0", + "import-from": "^3.0.0", + "lodash": "^4.17.4", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=10.18" + }, + "peerDependencies": { + "semantic-release": ">=16.0.0 <18.0.0" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "deprecated": "Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797)", + "dev": true, + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/@semantic-release/error": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-2.2.0.tgz", + "integrity": "sha512-9Tj/qn+y2j+sjCI3Jd+qseGtHjOAeg7dU2/lVcqIQ9TV3QDaDXDYXcoOHU+7o2Hwh8L8ymL4gfuO7KxDs3q2zg==", + "dev": true + }, + "node_modules/@semantic-release/github": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-7.2.3.tgz", + "integrity": "sha512-lWjIVDLal+EQBzy697ayUNN8MoBpp+jYIyW2luOdqn5XBH4d9bQGfTnjuLyzARZBHejqh932HVjiH/j4+R7VHw==", + "dev": true, + "dependencies": { + "@octokit/rest": "^18.0.0", + "@semantic-release/error": "^2.2.0", + "aggregate-error": "^3.0.0", + "bottleneck": "^2.18.1", + "debug": "^4.0.0", + "dir-glob": "^3.0.0", + "fs-extra": "^10.0.0", + "globby": "^11.0.0", + "http-proxy-agent": "^4.0.0", + "https-proxy-agent": "^5.0.0", + "issue-parser": "^6.0.0", + "lodash": "^4.17.4", + "mime": "^2.4.3", + "p-filter": "^2.0.0", + "p-retry": "^4.0.0", + "url-join": "^4.0.0" + }, + "engines": { + "node": ">=10.18" + }, + "peerDependencies": { + "semantic-release": ">=16.0.0 <18.0.0" + } + }, + "node_modules/@semantic-release/github/node_modules/fs-extra": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", + "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", + "dev": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@semantic-release/github/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "dev": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@semantic-release/npm": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-7.0.5.tgz", + "integrity": "sha512-D+oEmsx9aHE1q806NFQwSC9KdBO8ri/VO99eEz0wWbX2jyLqVyWr7t0IjKC8aSnkkQswg/4KN/ZjfF6iz1XOpw==", + "dev": true, + "dependencies": { + "@semantic-release/error": "^2.2.0", + "aggregate-error": "^3.0.0", + "execa": "^4.0.0", + "fs-extra": "^9.0.0", + "lodash": "^4.17.15", + "nerf-dart": "^1.0.0", + "normalize-url": "^5.0.0", + "npm": "^6.10.3", + "rc": "^1.2.8", + "read-pkg": "^5.0.0", + "registry-auth-token": "^4.0.0", + "semver": "^7.1.2", + "tempy": "^0.5.0" + }, + "engines": { + "node": ">=10.18" + }, + "peerDependencies": { + "semantic-release": ">=16.0.0 <18.0.0" + } + }, + "node_modules/@semantic-release/npm/node_modules/cross-spawn": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.1.tgz", + "integrity": "sha512-u7v4o84SwFpD32Z8IIcPZ6z1/ie24O6RU3RbtL5Y316l3KuHVPx9ItBgWQ6VlfAFnRnTtMUrsQ9MUUTuEZjogg==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@semantic-release/npm/node_modules/execa": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.0.0.tgz", + "integrity": "sha512-JbDUxwV3BoT5ZVXQrSVbAiaXhXUkIwvbhPIwZ0N13kX+5yCzOhUNdocxB/UQRuYOHRYYwAxKYwJYc0T4D12pDA==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/@semantic-release/npm/node_modules/get-stream": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", + "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "dev": true, + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/parse-json": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", + "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/semver": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.1.3.tgz", + "integrity": "sha512-ekM0zfiA9SCBlsKa2X1hxyxiI4L3B6EbVJkkdgQXnSEEaHlGdvyodMruTiulSRWMMB4NeIuYNMC9rTKTz97GxA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@semantic-release/npm/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/npm/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@semantic-release/release-notes-generator": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-9.0.1.tgz", + "integrity": "sha512-bOoTiH6SiiR0x2uywSNR7uZcRDl22IpZhj+Q5Bn0v+98MFtOMhCxFhbrKQjhbYoZw7vps1mvMRmFkp/g6R9cvQ==", + "dev": true, + "dependencies": { + "conventional-changelog-angular": "^5.0.0", + "conventional-changelog-writer": "^4.0.0", + "conventional-commits-filter": "^2.0.0", + "conventional-commits-parser": "^3.0.0", + "debug": "^4.0.0", + "get-stream": "^5.0.0", + "import-from": "^3.0.0", + "into-stream": "^5.0.0", + "lodash": "^4.17.4", + "read-pkg-up": "^7.0.0" + }, + "engines": { + "node": ">=10.18" + }, + "peerDependencies": { + "semantic-release": ">=15.8.0 <18.0.0" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "deprecated": "Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797)", + "dev": true, + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/get-stream": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", + "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "dev": true, + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/parse-json": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", + "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", + "dev": true + }, + "node_modules/@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "dev": true + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "dev": true + }, + "node_modules/@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.0.1.tgz", + "integrity": "sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA==", + "dev": true, + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/aggregate-error/node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/ansicolors": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", + "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", + "dev": true + }, + "node_modules/anymatch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", + "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/append-transform": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz", + "integrity": "sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg==", + "dev": true, + "dependencies": { + "default-require-extensions": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/archy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", + "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=", + "dev": true + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/argv-formatter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/argv-formatter/-/argv-formatter-1.0.0.tgz", + "integrity": "sha1-oMoMvCmltz6Dbuvhy/bF4OTrgvk=", + "dev": true + }, + "node_modules/array-find-index": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", + "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/array-ify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-ify/-/array-ify-1.0.0.tgz", + "integrity": "sha1-nlKHYrSpBmrRY6aWKjZEGOlibs4=", + "dev": true + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "node_modules/before-after-hook": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.1.tgz", + "integrity": "sha512-/6FKxSTWoJdbsLDF8tdIjaRiFXiE6UHsEHE3OPI/cwPURCVi1ukP0gmLn7XWEiFk5TcwQjjY5PWsU+j+tgXgmw==", + "dev": true + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/bottleneck": { + "version": "2.19.5", + "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", + "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "node_modules/caching-transform": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", + "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", + "dev": true, + "dependencies": { + "hasha": "^5.0.0", + "make-dir": "^3.0.0", + "package-hash": "^4.0.0", + "write-file-atomic": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", + "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/camelcase-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-4.2.0.tgz", + "integrity": "sha1-oqpfsa9oh1glnDLBQUJteJI7m3c=", + "dev": true, + "dependencies": { + "camelcase": "^4.1.0", + "map-obj": "^2.0.0", + "quick-lru": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cardinal": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", + "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", + "dev": true, + "dependencies": { + "ansicolors": "~0.3.2", + "redeyed": "~2.1.0" + }, + "bin": { + "cdl": "bin/cdl.js" + } + }, + "node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz", + "integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.1", + "braces": "~3.0.2", + "glob-parent": "~5.1.0", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.5.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.1.2" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-table": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/cli-table/-/cli-table-0.3.6.tgz", + "integrity": "sha512-ZkNZbnZjKERTY5NwC2SeMeLeifSPq/pubeRoTpdr3WchLlnZg6hEgvHkK5zL7KNFdd9PmHN8lxrENUwI3cE8vQ==", + "dev": true, + "dependencies": { + "colors": "1.0.3" + }, + "engines": { + "node": ">= 0.2.0" + } + }, + "node_modules/cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "node_modules/colors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", + "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", + "dev": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", + "dev": true + }, + "node_modules/compare-func": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/compare-func/-/compare-func-1.3.2.tgz", + "integrity": "sha1-md0LpFfh+bxyKxLAjsM+6rMfpkg=", + "dev": true, + "dependencies": { + "array-ify": "^1.0.0", + "dot-prop": "^3.0.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "node_modules/conventional-changelog-angular": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-5.0.6.tgz", + "integrity": "sha512-QDEmLa+7qdhVIv8sFZfVxU1VSyVvnXPsxq8Vam49mKUcO1Z8VTLEJk9uI21uiJUsnmm0I4Hrsdc9TgkOQo9WSA==", + "dev": true, + "dependencies": { + "compare-func": "^1.3.1", + "q": "^1.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/conventional-changelog-conventionalcommits": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-4.2.3.tgz", + "integrity": "sha512-atGa+R4vvEhb8N/8v3IoW59gCBJeeFiX6uIbPu876ENAmkMwsenyn0R21kdDHJFLQdy6zW4J6b4xN8KI3b9oww==", + "dev": true, + "dependencies": { + "compare-func": "^1.3.1", + "lodash": "^4.17.15", + "q": "^1.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/conventional-changelog-writer": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/conventional-changelog-writer/-/conventional-changelog-writer-4.0.11.tgz", + "integrity": "sha512-g81GQOR392I+57Cw3IyP1f+f42ME6aEkbR+L7v1FBBWolB0xkjKTeCWVguzRrp6UiT1O6gBpJbEy2eq7AnV1rw==", + "dev": true, + "dependencies": { + "compare-func": "^1.3.1", + "conventional-commits-filter": "^2.0.2", + "dateformat": "^3.0.0", + "handlebars": "^4.4.0", + "json-stringify-safe": "^5.0.1", + "lodash": "^4.17.15", + "meow": "^5.0.0", + "semver": "^6.0.0", + "split": "^1.0.0", + "through2": "^3.0.0" + }, + "bin": { + "conventional-changelog-writer": "cli.js" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/conventional-changelog-writer/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/conventional-commits-filter": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-2.0.2.tgz", + "integrity": "sha512-WpGKsMeXfs21m1zIw4s9H5sys2+9JccTzpN6toXtxhpw2VNF2JUXwIakthKBy+LN4DvJm+TzWhxOMWOs1OFCFQ==", + "dev": true, + "dependencies": { + "lodash.ismatch": "^4.4.0", + "modify-values": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/conventional-commits-parser": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-3.0.8.tgz", + "integrity": "sha512-YcBSGkZbYp7d+Cr3NWUeXbPDFUN6g3SaSIzOybi8bjHL5IJ5225OSCxJJ4LgziyEJ7AaJtE9L2/EU6H7Nt/DDQ==", + "dev": true, + "dependencies": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.0.4", + "lodash": "^4.17.15", + "meow": "^5.0.0", + "split2": "^2.0.0", + "through2": "^3.0.0", + "trim-off-newlines": "^1.0.0" + }, + "bin": { + "conventional-commits-parser": "cli.js" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.1" + } + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "node_modules/cosmiconfig": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.0.tgz", + "integrity": "sha512-pondGvTuVYDk++upghXJabWzL6Kxu6f26ljFw64Swq9v6sQPUL3EUlVDV56diOjpCayKihL6hVe8exIACU4XcA==", + "dev": true, + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cosmiconfig/node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cosmiconfig/node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", + "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/currently-unhandled": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", + "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", + "dev": true, + "dependencies": { + "array-find-index": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dateformat": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.3.tgz", + "integrity": "sha512-jyCETtSl3VMZMWeRo7iY1FL19ges1t55hMo5yaam4Jrsm5EPL89UQkoQRyiI+Yf4k8r2ZpdngkV8hr1lIdjb3Q==", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/debug": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "deprecated": "Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797)", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decamelize-keys": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", + "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", + "dev": true, + "dependencies": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decamelize-keys/node_modules/map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/default-require-extensions": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.0.tgz", + "integrity": "sha512-ek6DpXq/SCpvjhpFsLFRVtIxJCRw6fUR42lYMVZuUMK7n8eMz4Uh5clckdBjEpLhn/gEBZo7hDJnJcwdKLKQjg==", + "dev": true, + "dependencies": { + "strip-bom": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/default-require-extensions/node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==", + "dev": true + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dir-glob/node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/dot-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-3.0.0.tgz", + "integrity": "sha1-G3CK8JSknJoOfbyteQq6U52sEXc=", + "dev": true, + "dependencies": { + "is-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "dev": true, + "dependencies": { + "readable-stream": "^2.0.2" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/env-ci": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-5.0.2.tgz", + "integrity": "sha512-Xc41mKvjouTXD3Oy9AqySz1IeyvJvHZ20Twf5ZLYbNpPPIuCnL/qHCmNlD01LoNy0JTunw9HPYVptD19Ac7Mbw==", + "dev": true, + "dependencies": { + "execa": "^4.0.0", + "java-properties": "^1.0.0" + }, + "engines": { + "node": ">=10.13" + } + }, + "node_modules/env-ci/node_modules/execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/env-ci/node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es6-error": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", + "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", + "dev": true + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.0.0.tgz", + "integrity": "sha512-ov6w/2LCiuyO4RLYGdpFGjkcs0wMTgGE8PrkTHikeUy5iJekXyPIKUjifk5CsE0pt7sMCrMZ3YNqoCj6idQOnQ==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/execa/node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.5.tgz", + "integrity": "sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.0", + "merge2": "^1.3.0", + "micromatch": "^4.0.2", + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fastq": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz", + "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-cache-dir": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", + "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", + "dev": true, + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" + } + }, + "node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "dependencies": { + "locate-path": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/find-versions": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-4.0.0.tgz", + "integrity": "sha512-wgpWy002tA+wgmO27buH/9KzyEOQnKsG/R0yrcjPT9BOFm0zRBVQbZ95nRGXWMywS8YR5knRbpohio0bcJABxQ==", + "dev": true, + "dependencies": { + "semver-regex": "^3.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, + "node_modules/foreground-child": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", + "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/foreground-child/node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/foreground-child/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "dev": true, + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "node_modules/fromentries": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz", + "integrity": "sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/fs-extra": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.0.0.tgz", + "integrity": "sha512-pmEYSk3vYsG/bF651KPUXZ+hvjpgWYw/Gc7W9NFUe3ZVLczKKWIij3IKpOrQcdw4TILtibFslZ0UmR8Vvzig4g==", + "dev": true, + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^1.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", + "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", + "deprecated": "\"Please update to latest v2.3 or v2.2\"", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-log-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/git-log-parser/-/git-log-parser-1.2.0.tgz", + "integrity": "sha1-LmpMGxP8AAKCB7p5WnrDFme5/Uo=", + "dev": true, + "dependencies": { + "argv-formatter": "~1.0.0", + "spawn-error-forwarder": "~1.0.0", + "split2": "~1.0.0", + "stream-combiner2": "~1.1.1", + "through2": "~2.0.0", + "traverse": "~0.6.6" + } + }, + "node_modules/git-log-parser/node_modules/split2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-1.0.0.tgz", + "integrity": "sha1-UuLiIdiMdfmnP5BVbiY/+WdysxQ=", + "dev": true, + "dependencies": { + "through2": "~2.0.0" + } + }, + "node_modules/git-log-parser/node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.3.tgz", + "integrity": "sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.1.1", + "ignore": "^5.1.4", + "merge2": "^1.3.0", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", + "dev": true + }, + "node_modules/growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true, + "engines": { + "node": ">=4.x" + } + }, + "node_modules/handlebars": { + "version": "4.7.7", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz", + "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.0", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/handlebars/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "node_modules/handlebars/node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true + }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/hasha": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz", + "integrity": "sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==", + "dev": true, + "dependencies": { + "is-stream": "^2.0.0", + "type-fest": "^0.8.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasha/node_modules/is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/hasha/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, + "node_modules/hook-std": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hook-std/-/hook-std-2.0.0.tgz", + "integrity": "sha512-zZ6T5WcuBMIUVh49iPQS9t977t7C0l7OtHrpeMb5uk48JdflRX0NSFvCekfYNmGQETnLq9W/isMyHl69kxGi8g==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "dev": true, + "dependencies": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", + "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", + "dev": true, + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "dev": true, + "engines": { + "node": ">=8.12.0" + } + }, + "node_modules/ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/import-from": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/import-from/-/import-from-3.0.0.tgz", + "integrity": "sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", + "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/ini": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", + "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==", + "dev": true + }, + "node_modules/into-stream": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-5.1.1.tgz", + "integrity": "sha512-krrAJ7McQxGGmvaYbB7Q1mcA+cRwg9Ij2RfWIeVesNBgVDZmzY/Fa4IpZUT3bmdRzMzdf/mzltCG2Dq99IZGBA==", + "dev": true, + "dependencies": { + "from2": "^2.3.0", + "p-is-promise": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-text-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-text-path/-/is-text-path-1.0.1.tgz", + "integrity": "sha1-Thqg+1G/vLPpJogAE5cgLBd1tm4=", + "dev": true, + "dependencies": { + "text-extensions": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "node_modules/issue-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz", + "integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==", + "dev": true, + "dependencies": { + "lodash.capitalize": "^4.2.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.uniqby": "^4.7.0" + }, + "engines": { + "node": ">=10.13" + } + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz", + "integrity": "sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-hook": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz", + "integrity": "sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ==", + "dev": true, + "dependencies": { + "append-transform": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz", + "integrity": "sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.7.5", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.0.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/istanbul-lib-processinfo": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.2.tgz", + "integrity": "sha512-kOwpa7z9hme+IBPZMzQ5vdQj8srYgAtaRqeI48NGmAQ+/5yKiHLV0QbYqQpxsdEF0+w14SoB8YbnHKcXE2KnYw==", + "dev": true, + "dependencies": { + "archy": "^1.0.0", + "cross-spawn": "^7.0.0", + "istanbul-lib-coverage": "^3.0.0-alpha.1", + "make-dir": "^3.0.0", + "p-map": "^3.0.0", + "rimraf": "^3.0.0", + "uuid": "^3.3.3" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^3.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz", + "integrity": "sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-reports": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.0.2.tgz", + "integrity": "sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/java-properties": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/java-properties/-/java-properties-1.0.2.tgz", + "integrity": "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ==", + "dev": true, + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", + "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5" + }, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "node_modules/jsonfile": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.0.1.tgz", + "integrity": "sha512-jR2b5v7d2vIOust+w3wtFKZIfpC2pnRmFAhAC/BuweZFQR8qZzxH1OyrQ10HmdVYiXWkYUqPVsz91cG7EL2FBg==", + "dev": true, + "dependencies": { + "universalify": "^1.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", + "dev": true, + "engines": [ + "node >= 0.2.0" + ] + }, + "node_modules/JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "dev": true, + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/lines-and-columns": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", + "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", + "dev": true + }, + "node_modules/load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", + "dev": true, + "dependencies": { + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "dependencies": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true + }, + "node_modules/lodash.capitalize": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz", + "integrity": "sha1-+CbJtOKoUR2E46yinbBeGk87cqk=", + "dev": true + }, + "node_modules/lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha1-ZHYsSGGAglGKw99Mz11YhtriA0c=", + "dev": true + }, + "node_modules/lodash.flattendeep": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", + "integrity": "sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI=", + "dev": true + }, + "node_modules/lodash.ismatch": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz", + "integrity": "sha1-dWy1FQyjum8RCFp4hJZF8Yj4Xzc=", + "dev": true + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=", + "dev": true + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=", + "dev": true + }, + "node_modules/lodash.toarray": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.toarray/-/lodash.toarray-4.4.0.tgz", + "integrity": "sha1-JMS/zWsvuji/0FlNsRedjptlZWE=", + "dev": true + }, + "node_modules/lodash.uniqby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", + "integrity": "sha1-2ZwHpmnp5tJOE2Lf4mbGdhavEwI=", + "dev": true + }, + "node_modules/log-symbols": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/log-symbols/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/log-symbols/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/log-symbols/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/log-symbols/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/loud-rejection": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", + "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", + "dev": true, + "dependencies": { + "currently-unhandled": "^0.4.1", + "signal-exit": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/map-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-2.0.0.tgz", + "integrity": "sha1-plzSkIepJZi4eRJXpSPgISIqwfk=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/marked": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/marked/-/marked-2.0.3.tgz", + "integrity": "sha512-5otztIIcJfPc2qGTN8cVtOJEjNJZ0jwa46INMagrYfk0EvqtRuEHLsEe0LrFS0/q+ZRKT0+kXK7P2T1AN5lWRA==", + "dev": true, + "bin": { + "marked": "bin/marked" + }, + "engines": { + "node": ">= 8.16.2" + } + }, + "node_modules/marked-terminal": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-4.1.1.tgz", + "integrity": "sha512-t7Mdf6T3PvOEyN01c3tYxDzhyKZ8xnkp8Rs6Fohno63L/0pFTJ5Qtwto2AQVuDtbQiWzD+4E5AAu1Z2iLc8miQ==", + "dev": true, + "dependencies": { + "ansi-escapes": "^4.3.1", + "cardinal": "^2.1.1", + "chalk": "^4.1.0", + "cli-table": "^0.3.1", + "node-emoji": "^1.10.0", + "supports-hyperlinks": "^2.1.0" + }, + "peerDependencies": { + "marked": "^1.0.0 || ^2.0.0" + } + }, + "node_modules/marked-terminal/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/marked-terminal/node_modules/chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/marked-terminal/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/marked-terminal/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/marked-terminal/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/marked-terminal/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/meow": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-5.0.0.tgz", + "integrity": "sha512-CbTqYU17ABaLefO8vCU153ZZlprKYWDljcndKKDCFcYQITzWCXZAVk4QMFZPgvzrnUQ3uItnIE/LoUOwrT15Ig==", + "dev": true, + "dependencies": { + "camelcase-keys": "^4.0.0", + "decamelize-keys": "^1.0.0", + "loud-rejection": "^1.0.0", + "minimist-options": "^3.0.1", + "normalize-package-data": "^2.3.4", + "read-pkg-up": "^3.0.0", + "redent": "^2.0.0", + "trim-newlines": "^2.0.0", + "yargs-parser": "^10.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "dependencies": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mime": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", + "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==", + "dev": true, + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist-options": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-3.0.2.tgz", + "integrity": "sha512-FyBrT/d0d4+uiZRbqznPXqw3IpZZG3gl3wKWiX784FycUKVwBt0uLBFkQrtE4tZOrgo78nZp2jnKz3L65T5LdQ==", + "dev": true, + "dependencies": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/mocha": { + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.2.1.tgz", + "integrity": "sha512-cuLBVfyFfFqbNR0uUKbDGXKGk+UDFe6aR4os78XIrMQpZl/nv7JYHcvP5MFIAb374b2zFXsdgEGwmzMtP0Xg8w==", + "dev": true, + "dependencies": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.4.3", + "debug": "4.2.0", + "diff": "4.0.2", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.6", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "3.14.0", + "log-symbols": "4.0.0", + "minimatch": "3.0.4", + "ms": "2.1.2", + "nanoid": "3.1.12", + "serialize-javascript": "5.0.1", + "strip-json-comments": "3.1.1", + "supports-color": "7.2.0", + "which": "2.0.2", + "wide-align": "1.1.3", + "workerpool": "6.0.2", + "yargs": "13.3.2", + "yargs-parser": "13.1.2", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 10.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/mocha/node_modules/ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/mocha/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "node_modules/mocha/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/mocha/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/mocha/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/mocha/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mocha/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/mocha/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dev": true, + "dependencies": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + } + }, + "node_modules/mocha/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dev": true, + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + }, + "node_modules/mocha/node_modules/yargs/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/yargs/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/yargs/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/yargs/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/mocha/node_modules/yargs/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/modify-values": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/modify-values/-/modify-values-1.0.1.tgz", + "integrity": "sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/nanoid": { + "version": "3.1.12", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.12.tgz", + "integrity": "sha512-1qstj9z5+x491jfiC4Nelk+f8XBad7LN20PmyWINJEMRSf3wcAjAWysw1qaA8z6NSKe2sjq1hRSDpBH5paCb6A==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || >=13.7" + } + }, + "node_modules/neo-async": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", + "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==", + "dev": true + }, + "node_modules/nerf-dart": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz", + "integrity": "sha1-5tq3/r9a2Bbqgc9cYpxaDr3nLBo=", + "dev": true + }, + "node_modules/node-emoji": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.10.0.tgz", + "integrity": "sha512-Yt3384If5H6BYGVHiHwTL+99OzJKHhgp82S8/dktEK73T26BazdgZ4JZh92xSVtGNJvz9UbXdNAc5hcrXV42vw==", + "dev": true, + "dependencies": { + "lodash.toarray": "^4.4.0" + } + }, + "node_modules/node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dev": true, + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-preload": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz", + "integrity": "sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ==", + "dev": true, + "dependencies": { + "process-on-spawn": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-5.3.1.tgz", + "integrity": "sha512-K1c7+vaAP+Yh5bOGmA10PGPpp+6h7WZrl7GwqKhUflBc9flU9pzG27DDeB9+iuhZkE3BJZOcgN1P/2sS5pqrWw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm": { + "version": "6.14.6", + "resolved": "https://registry.npmjs.org/npm/-/npm-6.14.6.tgz", + "integrity": "sha512-axnz6iHFK6WPE0js/+mRp+4IOwpHn5tJEw5KB6FiCU764zmffrhsYHbSHi2kKqNkRBt53XasXjngZfBD3FQzrQ==", + "bundleDependencies": [ + "abbrev", + "ansicolors", + "ansistyles", + "aproba", + "archy", + "bin-links", + "bluebird", + "byte-size", + "cacache", + "call-limit", + "chownr", + "ci-info", + "cli-columns", + "cli-table3", + "cmd-shim", + "columnify", + "config-chain", + "debuglog", + "detect-indent", + "detect-newline", + "dezalgo", + "editor", + "figgy-pudding", + "find-npm-prefix", + "fs-vacuum", + "fs-write-stream-atomic", + "gentle-fs", + "glob", + "graceful-fs", + "has-unicode", + "hosted-git-info", + "iferr", + "imurmurhash", + "infer-owner", + "inflight", + "inherits", + "ini", + "init-package-json", + "is-cidr", + "json-parse-better-errors", + "JSONStream", + "lazy-property", + "libcipm", + "libnpm", + "libnpmaccess", + "libnpmhook", + "libnpmorg", + "libnpmsearch", + "libnpmteam", + "libnpx", + "lock-verify", + "lockfile", + "lodash._baseindexof", + "lodash._baseuniq", + "lodash._bindcallback", + "lodash._cacheindexof", + "lodash._createcache", + "lodash._getnative", + "lodash.clonedeep", + "lodash.restparam", + "lodash.union", + "lodash.uniq", + "lodash.without", + "lru-cache", + "meant", + "mississippi", + "mkdirp", + "move-concurrently", + "node-gyp", + "nopt", + "normalize-package-data", + "npm-audit-report", + "npm-cache-filename", + "npm-install-checks", + "npm-lifecycle", + "npm-package-arg", + "npm-packlist", + "npm-pick-manifest", + "npm-profile", + "npm-registry-fetch", + "npm-user-validate", + "npmlog", + "once", + "opener", + "osenv", + "pacote", + "path-is-inside", + "promise-inflight", + "qrcode-terminal", + "query-string", + "qw", + "read-cmd-shim", + "read-installed", + "read-package-json", + "read-package-tree", + "read", + "readable-stream", + "readdir-scoped-modules", + "request", + "retry", + "rimraf", + "safe-buffer", + "semver", + "sha", + "slide", + "sorted-object", + "sorted-union-stream", + "ssri", + "stringify-package", + "tar", + "text-table", + "tiny-relative-date", + "uid-number", + "umask", + "unique-filename", + "unpipe", + "update-notifier", + "uuid", + "validate-npm-package-license", + "validate-npm-package-name", + "which", + "worker-farm", + "write-file-atomic" + ], + "dev": true, + "dependencies": { + "abbrev": "~1.1.1", + "ansicolors": "~0.3.2", + "ansistyles": "~0.1.3", + "aproba": "^2.0.0", + "archy": "~1.0.0", + "bin-links": "^1.1.7", + "bluebird": "^3.5.5", + "byte-size": "^5.0.1", + "cacache": "^12.0.3", + "call-limit": "^1.1.1", + "chownr": "^1.1.4", + "ci-info": "^2.0.0", + "cli-columns": "^3.1.2", + "cli-table3": "^0.5.1", + "cmd-shim": "^3.0.3", + "columnify": "~1.5.4", + "config-chain": "^1.1.12", + "debuglog": "*", + "detect-indent": "~5.0.0", + "detect-newline": "^2.1.0", + "dezalgo": "~1.0.3", + "editor": "~1.0.0", + "figgy-pudding": "^3.5.1", + "find-npm-prefix": "^1.0.2", + "fs-vacuum": "~1.2.10", + "fs-write-stream-atomic": "~1.0.10", + "gentle-fs": "^2.3.0", + "glob": "^7.1.6", + "graceful-fs": "^4.2.4", + "has-unicode": "~2.0.1", + "hosted-git-info": "^2.8.8", + "iferr": "^1.0.2", + "imurmurhash": "*", + "infer-owner": "^1.0.4", + "inflight": "~1.0.6", + "inherits": "^2.0.4", + "ini": "^1.3.5", + "init-package-json": "^1.10.3", + "is-cidr": "^3.0.0", + "json-parse-better-errors": "^1.0.2", + "JSONStream": "^1.3.5", + "lazy-property": "~1.0.0", + "libcipm": "^4.0.7", + "libnpm": "^3.0.1", + "libnpmaccess": "^3.0.2", + "libnpmhook": "^5.0.3", + "libnpmorg": "^1.0.1", + "libnpmsearch": "^2.0.2", + "libnpmteam": "^1.0.2", + "libnpx": "^10.2.2", + "lock-verify": "^2.1.0", + "lockfile": "^1.0.4", + "lodash._baseindexof": "*", + "lodash._baseuniq": "~4.6.0", + "lodash._bindcallback": "*", + "lodash._cacheindexof": "*", + "lodash._createcache": "*", + "lodash._getnative": "*", + "lodash.clonedeep": "~4.5.0", + "lodash.restparam": "*", + "lodash.union": "~4.6.0", + "lodash.uniq": "~4.5.0", + "lodash.without": "~4.4.0", + "lru-cache": "^5.1.1", + "meant": "~1.0.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.5", + "move-concurrently": "^1.0.1", + "node-gyp": "^5.1.0", + "nopt": "^4.0.3", + "normalize-package-data": "^2.5.0", + "npm-audit-report": "^1.3.2", + "npm-cache-filename": "~1.0.2", + "npm-install-checks": "^3.0.2", + "npm-lifecycle": "^3.1.4", + "npm-package-arg": "^6.1.1", + "npm-packlist": "^1.4.8", + "npm-pick-manifest": "^3.0.2", + "npm-profile": "^4.0.4", + "npm-registry-fetch": "^4.0.5", + "npm-user-validate": "~1.0.0", + "npmlog": "~4.1.2", + "once": "~1.4.0", + "opener": "^1.5.1", + "osenv": "^0.1.5", + "pacote": "^9.5.12", + "path-is-inside": "~1.0.2", + "promise-inflight": "~1.0.1", + "qrcode-terminal": "^0.12.0", + "query-string": "^6.8.2", + "qw": "~1.0.1", + "read": "~1.0.7", + "read-cmd-shim": "^1.0.5", + "read-installed": "~4.0.3", + "read-package-json": "^2.1.1", + "read-package-tree": "^5.3.1", + "readable-stream": "^3.6.0", + "readdir-scoped-modules": "^1.1.0", + "request": "^2.88.0", + "retry": "^0.12.0", + "rimraf": "^2.7.1", + "safe-buffer": "^5.1.2", + "semver": "^5.7.1", + "sha": "^3.0.0", + "slide": "~1.1.6", + "sorted-object": "~2.0.1", + "sorted-union-stream": "~2.1.3", + "ssri": "^6.0.1", + "stringify-package": "^1.0.1", + "tar": "^4.4.13", + "text-table": "~0.2.0", + "tiny-relative-date": "^1.3.0", + "uid-number": "0.0.6", + "umask": "~1.1.0", + "unique-filename": "^1.1.1", + "unpipe": "~1.0.0", + "update-notifier": "^2.5.0", + "uuid": "^3.3.3", + "validate-npm-package-license": "^3.0.4", + "validate-npm-package-name": "~3.0.0", + "which": "^1.3.1", + "worker-farm": "^1.7.0", + "write-file-atomic": "^2.4.3" + }, + "bin": { + "npm": "bin/npm-cli.js", + "npx": "bin/npx-cli.js" + }, + "engines": { + "node": "6 >=6.2.0 || 8 || >=9.3.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/abbrev": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/agent-base": { + "version": "4.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "es6-promisify": "^5.0.0" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/npm/node_modules/agentkeepalive": { + "version": "3.5.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/npm/node_modules/ajv": { + "version": "5.5.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "co": "^4.6.0", + "fast-deep-equal": "^1.0.0", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.3.0" + } + }, + "node_modules/npm/node_modules/ansi-align": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "string-width": "^2.0.0" + } + }, + "node_modules/npm/node_modules/ansi-regex": { + "version": "2.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/ansi-styles": { + "version": "3.2.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/ansicolors": { + "version": "0.3.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/ansistyles": { + "version": "0.1.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/aproba": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/archy": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/are-we-there-yet": { + "version": "1.1.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "node_modules/npm/node_modules/are-we-there-yet/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/are-we-there-yet/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/asap": { + "version": "2.0.6", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/asn1": { + "version": "0.2.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/npm/node_modules/assert-plus": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/npm/node_modules/asynckit": { + "version": "0.4.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/aws-sign2": { + "version": "0.7.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/aws4": { + "version": "1.8.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/balanced-match": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause", + "optional": true, + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, + "node_modules/npm/node_modules/bin-links": { + "version": "1.1.7", + "dev": true, + "inBundle": true, + "license": "Artistic-2.0", + "dependencies": { + "bluebird": "^3.5.3", + "cmd-shim": "^3.0.0", + "gentle-fs": "^2.3.0", + "graceful-fs": "^4.1.15", + "npm-normalize-package-bin": "^1.0.0", + "write-file-atomic": "^2.3.0" + } + }, + "node_modules/npm/node_modules/bluebird": { + "version": "3.5.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/boxen": { + "version": "1.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-align": "^2.0.0", + "camelcase": "^4.0.0", + "chalk": "^2.0.1", + "cli-boxes": "^1.0.0", + "string-width": "^2.0.0", + "term-size": "^1.2.0", + "widest-line": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/brace-expansion": { + "version": "1.1.11", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/npm/node_modules/buffer-from": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/builtins": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/byline": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/byte-size": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/npm/node_modules/cacache": { + "version": "12.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "bluebird": "^3.5.5", + "chownr": "^1.1.1", + "figgy-pudding": "^3.5.1", + "glob": "^7.1.4", + "graceful-fs": "^4.1.15", + "infer-owner": "^1.0.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.3", + "ssri": "^6.0.1", + "unique-filename": "^1.1.1", + "y18n": "^4.0.0" + } + }, + "node_modules/npm/node_modules/call-limit": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/camelcase": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/capture-stack-trace": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/caseless": { + "version": "0.12.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0" + }, + "node_modules/npm/node_modules/chalk": { + "version": "2.4.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/chownr": { + "version": "1.1.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/ci-info": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/cidr-regex": { + "version": "2.0.10", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "ip-regex": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/cli-boxes": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/cli-columns": { + "version": "3.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "string-width": "^2.0.0", + "strip-ansi": "^3.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/npm/node_modules/cli-table3": { + "version": "0.5.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "object-assign": "^4.1.0", + "string-width": "^2.1.1" + }, + "engines": { + "node": ">=6" + }, + "optionalDependencies": { + "colors": "^1.1.2" + } + }, + "node_modules/npm/node_modules/cliui": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "string-width": "^2.1.1", + "strip-ansi": "^4.0.0", + "wrap-ansi": "^2.0.0" + } + }, + "node_modules/npm/node_modules/cliui/node_modules/ansi-regex": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/cliui/node_modules/strip-ansi": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/clone": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/npm/node_modules/cmd-shim": { + "version": "3.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.2", + "mkdirp": "~0.5.0" + } + }, + "node_modules/npm/node_modules/co": { + "version": "4.6.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/npm/node_modules/code-point-at": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/color-convert": { + "version": "1.9.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "color-name": "^1.1.1" + } + }, + "node_modules/npm/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/colors": { + "version": "1.3.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/npm/node_modules/columnify": { + "version": "1.5.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "strip-ansi": "^3.0.0", + "wcwidth": "^1.0.0" + } + }, + "node_modules/npm/node_modules/combined-stream": { + "version": "1.0.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/npm/node_modules/concat-map": { + "version": "0.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/concat-stream": { + "version": "1.6.2", + "dev": true, + "engines": [ + "node >= 0.8" + ], + "inBundle": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/npm/node_modules/concat-stream/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/concat-stream/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/config-chain": { + "version": "1.1.12", + "dev": true, + "inBundle": true, + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/npm/node_modules/configstore": { + "version": "3.1.2", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "dot-prop": "^4.1.0", + "graceful-fs": "^4.1.2", + "make-dir": "^1.0.0", + "unique-string": "^1.0.0", + "write-file-atomic": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/console-control-strings": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/copy-concurrently": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "node_modules/npm/node_modules/copy-concurrently/node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/copy-concurrently/node_modules/iferr": { + "version": "0.1.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/core-util-is": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/create-error-class": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "capture-stack-trace": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/cross-spawn": { + "version": "5.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "node_modules/npm/node_modules/cross-spawn/node_modules/lru-cache": { + "version": "4.1.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "node_modules/npm/node_modules/cross-spawn/node_modules/yallist": { + "version": "2.1.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/crypto-random-string": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/cyclist": { + "version": "0.2.2", + "dev": true, + "inBundle": true + }, + "node_modules/npm/node_modules/dashdash": { + "version": "1.14.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/npm/node_modules/debug": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/npm/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/debuglog": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/decamelize": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/decode-uri-component": { + "version": "0.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/npm/node_modules/deep-extend": { + "version": "0.6.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/npm/node_modules/defaults": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + } + }, + "node_modules/npm/node_modules/define-properties": { + "version": "1.1.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "object-keys": "^1.0.12" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/delayed-stream": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/npm/node_modules/delegates": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/detect-indent": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/detect-newline": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/dezalgo": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "asap": "^2.0.0", + "wrappy": "1" + } + }, + "node_modules/npm/node_modules/dot-prop": { + "version": "4.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "is-obj": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/dotenv": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.6.0" + } + }, + "node_modules/npm/node_modules/duplexer3": { + "version": "0.1.4", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause" + }, + "node_modules/npm/node_modules/duplexify": { + "version": "3.6.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/npm/node_modules/duplexify/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/duplexify/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/ecc-jsbn": { + "version": "0.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/npm/node_modules/editor": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/encoding": { + "version": "0.1.12", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "~0.4.13" + } + }, + "node_modules/npm/node_modules/end-of-stream": { + "version": "1.4.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/npm/node_modules/env-paths": { + "version": "2.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/err-code": { + "version": "1.1.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/errno": { + "version": "0.1.7", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "prr": "~1.0.1" + }, + "bin": { + "errno": "cli.js" + } + }, + "node_modules/npm/node_modules/es-abstract": { + "version": "1.12.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "es-to-primitive": "^1.1.1", + "function-bind": "^1.1.1", + "has": "^1.0.1", + "is-callable": "^1.1.3", + "is-regex": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/es-to-primitive": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/es6-promise": { + "version": "4.2.8", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/es6-promisify": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "es6-promise": "^4.0.3" + } + }, + "node_modules/npm/node_modules/escape-string-regexp": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/npm/node_modules/execa": { + "version": "0.7.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/execa/node_modules/get-stream": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/extend": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/extsprintf": { + "version": "1.3.0", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/fast-deep-equal": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/fast-json-stable-stringify": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/figgy-pudding": { + "version": "3.5.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/find-npm-prefix": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/find-up": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "locate-path": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/flush-write-stream": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.4" + } + }, + "node_modules/npm/node_modules/flush-write-stream/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/flush-write-stream/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/forever-agent": { + "version": "0.6.1", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/form-data": { + "version": "2.3.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/npm/node_modules/from2": { + "version": "2.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "node_modules/npm/node_modules/from2/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/from2/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/fs-minipass": { + "version": "1.2.7", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^2.6.0" + } + }, + "node_modules/npm/node_modules/fs-minipass/node_modules/minipass": { + "version": "2.9.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "node_modules/npm/node_modules/fs-vacuum": { + "version": "1.2.10", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.2", + "path-is-inside": "^1.0.1", + "rimraf": "^2.5.2" + } + }, + "node_modules/npm/node_modules/fs-write-stream-atomic": { + "version": "1.0.10", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "node_modules/npm/node_modules/fs-write-stream-atomic/node_modules/iferr": { + "version": "0.1.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/fs-write-stream-atomic/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/fs-write-stream-atomic/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/fs.realpath": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/function-bind": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/gauge": { + "version": "2.7.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "node_modules/npm/node_modules/gauge/node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/gauge/node_modules/string-width": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/genfun": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/gentle-fs": { + "version": "2.3.0", + "dev": true, + "inBundle": true, + "license": "Artistic-2.0", + "dependencies": { + "aproba": "^1.1.2", + "chownr": "^1.1.2", + "cmd-shim": "^3.0.3", + "fs-vacuum": "^1.2.10", + "graceful-fs": "^4.1.11", + "iferr": "^0.1.5", + "infer-owner": "^1.0.4", + "mkdirp": "^0.5.1", + "path-is-inside": "^1.0.2", + "read-cmd-shim": "^1.0.1", + "slide": "^1.1.6" + } + }, + "node_modules/npm/node_modules/gentle-fs/node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/gentle-fs/node_modules/iferr": { + "version": "0.1.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/get-caller-file": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/get-stream": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/getpass": { + "version": "0.1.7", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/npm/node_modules/glob": { + "version": "7.1.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/global-dirs": { + "version": "0.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/got": { + "version": "6.7.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/got/node_modules/get-stream": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/graceful-fs": { + "version": "4.2.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/har-schema": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/har-validator": { + "version": "5.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "ajv": "^5.3.0", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/has": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/npm/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/has-symbols": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/has-unicode": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/hosted-git-info": { + "version": "2.8.8", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/http-cache-semantics": { + "version": "3.8.1", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause" + }, + "node_modules/npm/node_modules/http-proxy-agent": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "4", + "debug": "3.1.0" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/npm/node_modules/http-signature": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + }, + "engines": { + "node": ">=0.8", + "npm": ">=1.3.7" + } + }, + "node_modules/npm/node_modules/https-proxy-agent": { + "version": "2.2.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "^4.3.0", + "debug": "^3.1.0" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/npm/node_modules/humanize-ms": { + "version": "1.2.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/npm/node_modules/iconv-lite": { + "version": "0.4.23", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/iferr": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/npm/node_modules/ignore-walk": { + "version": "3.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minimatch": "^3.0.4" + } + }, + "node_modules/npm/node_modules/import-lazy": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/imurmurhash": { + "version": "0.1.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/npm/node_modules/infer-owner": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/inflight": { + "version": "1.0.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/npm/node_modules/inherits": { + "version": "2.0.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/ini": { + "version": "1.3.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/init-package-json": { + "version": "1.10.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.1", + "npm-package-arg": "^4.0.0 || ^5.0.0 || ^6.0.0", + "promzard": "^0.3.0", + "read": "~1.0.1", + "read-package-json": "1 || 2", + "semver": "2.x || 3.x || 4 || 5", + "validate-npm-package-license": "^3.0.1", + "validate-npm-package-name": "^3.0.0" + } + }, + "node_modules/npm/node_modules/invert-kv": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/ip": { + "version": "1.1.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/ip-regex": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/is-callable": { + "version": "1.1.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/is-ci": { + "version": "1.2.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ci-info": "^1.5.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/npm/node_modules/is-ci/node_modules/ci-info": { + "version": "1.6.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/is-cidr": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "cidr-regex": "^2.0.10" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/is-date-object": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/is-fullwidth-code-point": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "number-is-nan": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-installed-globally": { + "version": "0.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "global-dirs": "^0.1.0", + "is-path-inside": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/is-npm": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-obj": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-path-inside": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "path-is-inside": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-redirect": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-regex": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "has": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/is-retry-allowed": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-stream": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/is-symbol": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/is-typedarray": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/isarray": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/isexe": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/isstream": { + "version": "0.1.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/jsbn": { + "version": "0.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true + }, + "node_modules/npm/node_modules/json-parse-better-errors": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/json-schema": { + "version": "0.2.3", + "dev": true, + "inBundle": true + }, + "node_modules/npm/node_modules/json-schema-traverse": { + "version": "0.3.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/json-stringify-safe": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/jsonparse": { + "version": "1.3.1", + "dev": true, + "engines": [ + "node >= 0.2.0" + ], + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/JSONStream": { + "version": "1.3.5", + "dev": true, + "inBundle": true, + "license": "(MIT OR Apache-2.0)", + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/jsprim": { + "version": "1.4.1", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "inBundle": true, + "license": "MIT", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "node_modules/npm/node_modules/latest-version": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "package-json": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/lazy-property": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lcid": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "invert-kv": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libcipm": { + "version": "4.0.7", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "bin-links": "^1.1.2", + "bluebird": "^3.5.1", + "figgy-pudding": "^3.5.1", + "find-npm-prefix": "^1.0.2", + "graceful-fs": "^4.1.11", + "ini": "^1.3.5", + "lock-verify": "^2.0.2", + "mkdirp": "^0.5.1", + "npm-lifecycle": "^3.0.0", + "npm-logical-tree": "^1.2.1", + "npm-package-arg": "^6.1.0", + "pacote": "^9.1.0", + "read-package-json": "^2.0.13", + "rimraf": "^2.6.2", + "worker-farm": "^1.6.0" + } + }, + "node_modules/npm/node_modules/libnpm": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "bin-links": "^1.1.2", + "bluebird": "^3.5.3", + "find-npm-prefix": "^1.0.2", + "libnpmaccess": "^3.0.2", + "libnpmconfig": "^1.2.1", + "libnpmhook": "^5.0.3", + "libnpmorg": "^1.0.1", + "libnpmpublish": "^1.1.2", + "libnpmsearch": "^2.0.2", + "libnpmteam": "^1.0.2", + "lock-verify": "^2.0.2", + "npm-lifecycle": "^3.0.0", + "npm-logical-tree": "^1.2.1", + "npm-package-arg": "^6.1.0", + "npm-profile": "^4.0.2", + "npm-registry-fetch": "^4.0.0", + "npmlog": "^4.1.2", + "pacote": "^9.5.3", + "read-package-json": "^2.0.13", + "stringify-package": "^1.0.0" + } + }, + "node_modules/npm/node_modules/libnpmaccess": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "get-stream": "^4.0.0", + "npm-package-arg": "^6.1.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/libnpmconfig": { + "version": "1.2.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "figgy-pudding": "^3.5.1", + "find-up": "^3.0.0", + "ini": "^1.3.5" + } + }, + "node_modules/npm/node_modules/libnpmconfig/node_modules/find-up": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libnpmconfig/node_modules/locate-path": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libnpmconfig/node_modules/p-limit": { + "version": "2.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libnpmconfig/node_modules/p-locate": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libnpmconfig/node_modules/p-try": { + "version": "2.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/libnpmhook": { + "version": "5.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/libnpmorg": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/libnpmpublish": { + "version": "1.1.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.5.1", + "get-stream": "^4.0.0", + "lodash.clonedeep": "^4.5.0", + "normalize-package-data": "^2.4.0", + "npm-package-arg": "^6.1.0", + "npm-registry-fetch": "^4.0.0", + "semver": "^5.5.1", + "ssri": "^6.0.1" + } + }, + "node_modules/npm/node_modules/libnpmsearch": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "figgy-pudding": "^3.5.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/libnpmteam": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/libnpx": { + "version": "10.2.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "dotenv": "^5.0.1", + "npm-package-arg": "^6.0.0", + "rimraf": "^2.6.2", + "safe-buffer": "^5.1.0", + "update-notifier": "^2.3.0", + "which": "^1.3.0", + "y18n": "^4.0.0", + "yargs": "^11.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/locate-path": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/lock-verify": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-package-arg": "^6.1.0", + "semver": "^5.4.1" + } + }, + "node_modules/npm/node_modules/lockfile": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "signal-exit": "^3.0.2" + } + }, + "node_modules/npm/node_modules/lodash._baseindexof": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash._baseuniq": { + "version": "4.6.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "lodash._createset": "~4.0.0", + "lodash._root": "~3.0.0" + } + }, + "node_modules/npm/node_modules/lodash._bindcallback": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash._cacheindexof": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash._createcache": { + "version": "3.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "lodash._getnative": "^3.0.0" + } + }, + "node_modules/npm/node_modules/lodash._createset": { + "version": "4.0.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash._getnative": { + "version": "3.9.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash._root": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash.clonedeep": { + "version": "4.5.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash.restparam": { + "version": "3.6.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash.union": { + "version": "4.6.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash.uniq": { + "version": "4.5.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lodash.without": { + "version": "4.4.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/lowercase-keys": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/lru-cache": { + "version": "5.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/npm/node_modules/make-dir": { + "version": "1.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/make-fetch-happen": { + "version": "5.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^3.4.1", + "cacache": "^12.0.0", + "http-cache-semantics": "^3.8.1", + "http-proxy-agent": "^2.1.0", + "https-proxy-agent": "^2.2.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "node-fetch-npm": "^2.0.2", + "promise-retry": "^1.1.1", + "socks-proxy-agent": "^4.0.0", + "ssri": "^6.0.0" + } + }, + "node_modules/npm/node_modules/map-age-cleaner": { + "version": "0.1.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-defer": "^1.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/meant": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/mem": { + "version": "4.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "map-age-cleaner": "^0.1.1", + "mimic-fn": "^2.0.0", + "p-is-promise": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/mem/node_modules/mimic-fn": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/mime-db": { + "version": "1.35.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/npm/node_modules/mime-types": { + "version": "2.1.19", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "mime-db": "~1.35.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/npm/node_modules/minimatch": { + "version": "3.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/minizlib": { + "version": "1.3.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "minipass": "^2.9.0" + } + }, + "node_modules/npm/node_modules/minizlib/node_modules/minipass": { + "version": "2.9.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "node_modules/npm/node_modules/mississippi": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^3.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/npm/node_modules/mkdirp": { + "version": "0.5.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/npm/node_modules/mkdirp/node_modules/minimist": { + "version": "1.2.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/move-concurrently": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "node_modules/npm/node_modules/move-concurrently/node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/ms": { + "version": "2.1.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/mute-stream": { + "version": "0.0.7", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/nice-try": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/node-fetch-npm": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "encoding": "^0.1.11", + "json-parse-better-errors": "^1.0.0", + "safe-buffer": "^5.1.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/node-gyp": { + "version": "5.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.2", + "mkdirp": "^0.5.1", + "nopt": "^4.0.1", + "npmlog": "^4.1.2", + "request": "^2.88.0", + "rimraf": "^2.6.3", + "semver": "^5.7.1", + "tar": "^4.4.12", + "which": "^1.3.1" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/npm/node_modules/nopt": { + "version": "4.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "abbrev": "1", + "osenv": "^0.1.4" + }, + "bin": { + "nopt": "bin/nopt.js" + } + }, + "node_modules/npm/node_modules/normalize-package-data": { + "version": "2.5.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/npm/node_modules/normalize-package-data/node_modules/resolve": { + "version": "1.10.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "path-parse": "^1.0.6" + } + }, + "node_modules/npm/node_modules/npm-audit-report": { + "version": "1.3.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "cli-table3": "^0.5.0", + "console-control-strings": "^1.1.0" + } + }, + "node_modules/npm/node_modules/npm-bundled": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "node_modules/npm/node_modules/npm-cache-filename": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/npm-install-checks": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "semver": "^2.3.0 || 3.x || 4 || 5" + } + }, + "node_modules/npm/node_modules/npm-lifecycle": { + "version": "3.1.4", + "dev": true, + "inBundle": true, + "license": "Artistic-2.0", + "dependencies": { + "byline": "^5.0.0", + "graceful-fs": "^4.1.15", + "node-gyp": "^5.0.2", + "resolve-from": "^4.0.0", + "slide": "^1.1.6", + "uid-number": "0.0.6", + "umask": "^1.1.0", + "which": "^1.3.1" + } + }, + "node_modules/npm/node_modules/npm-logical-tree": { + "version": "1.2.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/npm-normalize-package-bin": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/npm-package-arg": { + "version": "6.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "hosted-git-info": "^2.7.1", + "osenv": "^0.1.5", + "semver": "^5.6.0", + "validate-npm-package-name": "^3.0.0" + } + }, + "node_modules/npm/node_modules/npm-packlist": { + "version": "1.4.8", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" + } + }, + "node_modules/npm/node_modules/npm-pick-manifest": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "figgy-pudding": "^3.5.1", + "npm-package-arg": "^6.0.0", + "semver": "^5.4.1" + } + }, + "node_modules/npm/node_modules/npm-profile": { + "version": "4.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.2 || 2", + "figgy-pudding": "^3.4.1", + "npm-registry-fetch": "^4.0.0" + } + }, + "node_modules/npm/node_modules/npm-registry-fetch": { + "version": "4.0.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "bluebird": "^3.5.1", + "figgy-pudding": "^3.4.1", + "JSONStream": "^1.3.4", + "lru-cache": "^5.1.1", + "make-fetch-happen": "^5.0.0", + "npm-package-arg": "^6.1.0", + "safe-buffer": "^5.2.0" + } + }, + "node_modules/npm/node_modules/npm-registry-fetch/node_modules/safe-buffer": { + "version": "5.2.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/npm-run-path": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "path-key": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/npm-user-validate": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause" + }, + "node_modules/npm/node_modules/npmlog": { + "version": "4.1.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "node_modules/npm/node_modules/number-is-nan": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/oauth-sign": { + "version": "0.9.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/object-assign": { + "version": "4.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/object-keys": { + "version": "1.0.12", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/npm/node_modules/object.getownpropertydescriptors": { + "version": "2.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.2", + "es-abstract": "^1.5.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/npm/node_modules/once": { + "version": "1.4.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/npm/node_modules/opener": { + "version": "1.5.1", + "dev": true, + "inBundle": true, + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/npm/node_modules/os-homedir": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/os-locale": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "execa": "^1.0.0", + "lcid": "^2.0.0", + "mem": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/os-locale/node_modules/cross-spawn": { + "version": "6.0.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "engines": { + "node": ">=4.8" + } + }, + "node_modules/npm/node_modules/os-locale/node_modules/execa": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/os-tmpdir": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/osenv": { + "version": "0.1.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "node_modules/npm/node_modules/p-defer": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/p-finally": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/p-is-promise": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/p-limit": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-try": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/p-locate": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "p-limit": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/p-try": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/package-json": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "got": "^6.7.1", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/pacote": { + "version": "9.5.12", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "bluebird": "^3.5.3", + "cacache": "^12.0.2", + "chownr": "^1.1.2", + "figgy-pudding": "^3.5.1", + "get-stream": "^4.1.0", + "glob": "^7.1.3", + "infer-owner": "^1.0.4", + "lru-cache": "^5.1.1", + "make-fetch-happen": "^5.0.0", + "minimatch": "^3.0.4", + "minipass": "^2.3.5", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "normalize-package-data": "^2.4.0", + "npm-normalize-package-bin": "^1.0.0", + "npm-package-arg": "^6.1.0", + "npm-packlist": "^1.1.12", + "npm-pick-manifest": "^3.0.0", + "npm-registry-fetch": "^4.0.0", + "osenv": "^0.1.5", + "promise-inflight": "^1.0.1", + "promise-retry": "^1.1.1", + "protoduck": "^5.0.1", + "rimraf": "^2.6.2", + "safe-buffer": "^5.1.2", + "semver": "^5.6.0", + "ssri": "^6.0.1", + "tar": "^4.4.10", + "unique-filename": "^1.1.1", + "which": "^1.3.1" + } + }, + "node_modules/npm/node_modules/pacote/node_modules/minipass": { + "version": "2.9.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "node_modules/npm/node_modules/parallel-transform": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "cyclist": "~0.2.2", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "node_modules/npm/node_modules/parallel-transform/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/parallel-transform/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/path-exists": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/path-is-absolute": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/path-is-inside": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "(WTFPL OR MIT)" + }, + "node_modules/npm/node_modules/path-key": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/path-parse": { + "version": "1.0.6", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/performance-now": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/pify": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/prepend-http": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/process-nextick-args": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/promise-inflight": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/promise-retry": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "err-code": "^1.0.0", + "retry": "^0.10.0" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/npm/node_modules/promise-retry/node_modules/retry": { + "version": "0.10.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/promzard": { + "version": "0.3.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "read": "1" + } + }, + "node_modules/npm/node_modules/proto-list": { + "version": "1.2.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/protoduck": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "genfun": "^5.0.0" + } + }, + "node_modules/npm/node_modules/prr": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/pseudomap": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/psl": { + "version": "1.1.29", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/pump": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/npm/node_modules/pumpify": { + "version": "1.5.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "node_modules/npm/node_modules/pumpify/node_modules/pump": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/npm/node_modules/punycode": { + "version": "1.4.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/qrcode-terminal": { + "version": "0.12.0", + "dev": true, + "inBundle": true, + "bin": { + "qrcode-terminal": "bin/qrcode-terminal.js" + } + }, + "node_modules/npm/node_modules/qs": { + "version": "6.5.2", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/npm/node_modules/query-string": { + "version": "6.8.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "decode-uri-component": "^0.2.0", + "split-on-first": "^1.0.0", + "strict-uri-encode": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/qw": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/rc": { + "version": "1.2.8", + "dev": true, + "inBundle": true, + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/npm/node_modules/rc/node_modules/minimist": { + "version": "1.2.5", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/read": { + "version": "1.0.7", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "mute-stream": "~0.0.4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/npm/node_modules/read-cmd-shim": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.2" + } + }, + "node_modules/npm/node_modules/read-installed": { + "version": "4.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "debuglog": "^1.0.1", + "read-package-json": "^2.0.0", + "readdir-scoped-modules": "^1.0.0", + "semver": "2 || 3 || 4 || 5", + "slide": "~1.1.3", + "util-extend": "^1.0.1" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.2" + } + }, + "node_modules/npm/node_modules/read-package-json": { + "version": "2.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.1", + "json-parse-better-errors": "^1.0.1", + "normalize-package-data": "^2.0.0", + "npm-normalize-package-bin": "^1.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.2" + } + }, + "node_modules/npm/node_modules/read-package-tree": { + "version": "5.3.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "read-package-json": "^2.0.0", + "readdir-scoped-modules": "^1.0.0", + "util-promisify": "^2.1.0" + } + }, + "node_modules/npm/node_modules/readable-stream": { + "version": "3.6.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/npm/node_modules/readdir-scoped-modules": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "debuglog": "^1.0.1", + "dezalgo": "^1.0.0", + "graceful-fs": "^4.1.2", + "once": "^1.3.0" + } + }, + "node_modules/npm/node_modules/registry-auth-token": { + "version": "3.4.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/npm/node_modules/registry-url": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "rc": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/request": { + "version": "2.88.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.0", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.4.3", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/npm/node_modules/require-directory": { + "version": "2.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/require-main-filename": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/resolve-from": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/retry": { + "version": "0.12.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/npm/node_modules/rimraf": { + "version": "2.7.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/npm/node_modules/run-queue": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.1.1" + } + }, + "node_modules/npm/node_modules/run-queue/node_modules/aproba": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/safe-buffer": { + "version": "5.1.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/safer-buffer": { + "version": "2.1.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/semver": { + "version": "5.7.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/npm/node_modules/semver-diff": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "semver": "^5.0.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/set-blocking": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/sha": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "(BSD-2-Clause OR MIT)", + "dependencies": { + "graceful-fs": "^4.1.2" + } + }, + "node_modules/npm/node_modules/shebang-command": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/shebang-regex": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/signal-exit": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/slide": { + "version": "1.1.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/smart-buffer": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/npm/node_modules/socks": { + "version": "2.3.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ip": "1.1.5", + "smart-buffer": "^4.1.0" + }, + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/npm/node_modules/socks-proxy-agent": { + "version": "4.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "~4.2.1", + "socks": "~2.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/npm/node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "4.2.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "es6-promisify": "^5.0.0" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/npm/node_modules/sorted-object": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "(WTFPL OR MIT)" + }, + "node_modules/npm/node_modules/sorted-union-stream": { + "version": "2.1.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "from2": "^1.3.0", + "stream-iterate": "^1.1.0" + } + }, + "node_modules/npm/node_modules/sorted-union-stream/node_modules/from2": { + "version": "1.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "inherits": "~2.0.1", + "readable-stream": "~1.1.10" + } + }, + "node_modules/npm/node_modules/sorted-union-stream/node_modules/isarray": { + "version": "0.0.1", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/sorted-union-stream/node_modules/readable-stream": { + "version": "1.1.14", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "node_modules/npm/node_modules/sorted-union-stream/node_modules/string_decoder": { + "version": "0.10.31", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/spdx-correct": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/spdx-exceptions": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "CC-BY-3.0" + }, + "node_modules/npm/node_modules/spdx-expression-parse": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/spdx-license-ids": { + "version": "3.0.5", + "dev": true, + "inBundle": true, + "license": "CC0-1.0" + }, + "node_modules/npm/node_modules/split-on-first": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/sshpk": { + "version": "1.14.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "dashdash": "^1.12.0", + "getpass": "^0.1.1", + "safer-buffer": "^2.0.2" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + }, + "optionalDependencies": { + "bcrypt-pbkdf": "^1.0.0", + "ecc-jsbn": "~0.1.1", + "jsbn": "~0.1.0", + "tweetnacl": "~0.14.0" + } + }, + "node_modules/npm/node_modules/ssri": { + "version": "6.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "figgy-pudding": "^3.5.1" + } + }, + "node_modules/npm/node_modules/stream-each": { + "version": "1.2.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/npm/node_modules/stream-iterate": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "readable-stream": "^2.1.5", + "stream-shift": "^1.0.0" + } + }, + "node_modules/npm/node_modules/stream-iterate/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/stream-iterate/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/stream-shift": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/strict-uri-encode": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/string_decoder": { + "version": "1.3.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/npm/node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.2.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/string-width": { + "version": "2.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/string-width/node_modules/ansi-regex": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/string-width/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/string-width/node_modules/strip-ansi": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/stringify-package": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/strip-ansi": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/strip-eof": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/strip-json-comments": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/supports-color": { + "version": "5.4.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/tar": { + "version": "4.4.13", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + }, + "engines": { + "node": ">=4.5" + } + }, + "node_modules/npm/node_modules/tar/node_modules/minipass": { + "version": "2.9.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "node_modules/npm/node_modules/term-size": { + "version": "1.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "execa": "^0.7.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/text-table": { + "version": "0.2.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/through": { + "version": "2.3.8", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/through2": { + "version": "2.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "readable-stream": "^2.1.5", + "xtend": "~4.0.1" + } + }, + "node_modules/npm/node_modules/through2/node_modules/readable-stream": { + "version": "2.3.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/npm/node_modules/through2/node_modules/string_decoder": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/npm/node_modules/timed-out": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/tiny-relative-date": { + "version": "1.3.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/tough-cookie": { + "version": "2.4.3", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.24", + "punycode": "^1.4.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/npm/node_modules/tunnel-agent": { + "version": "0.6.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/tweetnacl": { + "version": "0.14.5", + "dev": true, + "inBundle": true, + "license": "Unlicense", + "optional": true + }, + "node_modules/npm/node_modules/typedarray": { + "version": "0.0.6", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/uid-number": { + "version": "0.0.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "*" + } + }, + "node_modules/npm/node_modules/umask": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/unique-filename": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/npm/node_modules/unique-slug": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, + "node_modules/npm/node_modules/unique-string": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "crypto-random-string": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/unpipe": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/npm/node_modules/unzip-response": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/update-notifier": { + "version": "2.5.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "boxen": "^1.2.1", + "chalk": "^2.0.1", + "configstore": "^3.0.0", + "import-lazy": "^2.1.0", + "is-ci": "^1.0.10", + "is-installed-globally": "^0.1.0", + "is-npm": "^1.0.0", + "latest-version": "^3.0.0", + "semver-diff": "^2.0.0", + "xdg-basedir": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/url-parse-lax": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "prepend-http": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/util-deprecate": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/util-extend": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/util-promisify": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "object.getownpropertydescriptors": "^2.0.3" + } + }, + "node_modules/npm/node_modules/uuid": { + "version": "3.3.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/npm/node_modules/validate-npm-package-license": { + "version": "3.0.4", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/npm/node_modules/validate-npm-package-name": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "builtins": "^1.0.3" + } + }, + "node_modules/npm/node_modules/verror": { + "version": "1.10.0", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "inBundle": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/npm/node_modules/wcwidth": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/npm/node_modules/which": { + "version": "1.3.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/npm/node_modules/which-module": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/wide-align": { + "version": "1.1.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "string-width": "^1.0.2" + } + }, + "node_modules/npm/node_modules/wide-align/node_modules/string-width": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/widest-line": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "string-width": "^2.1.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/worker-farm": { + "version": "1.7.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "errno": "~0.1.7" + } + }, + "node_modules/npm/node_modules/wrap-ansi": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/wrap-ansi/node_modules/string-width": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/wrappy": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/write-file-atomic": { + "version": "2.4.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.11", + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.2" + } + }, + "node_modules/npm/node_modules/xdg-basedir": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/xtend": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/npm/node_modules/y18n": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/yallist": { + "version": "3.0.3", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/yargs": { + "version": "11.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "cliui": "^4.0.0", + "decamelize": "^1.1.1", + "find-up": "^2.1.0", + "get-caller-file": "^1.0.1", + "os-locale": "^3.1.0", + "require-directory": "^2.1.1", + "require-main-filename": "^1.0.1", + "set-blocking": "^2.0.0", + "string-width": "^2.0.0", + "which-module": "^2.0.0", + "y18n": "^3.2.1", + "yargs-parser": "^9.0.2" + } + }, + "node_modules/npm/node_modules/yargs-parser": { + "version": "9.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "camelcase": "^4.1.0" + } + }, + "node_modules/npm/node_modules/yargs/node_modules/y18n": { + "version": "3.2.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/nyc": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/nyc/-/nyc-15.1.0.tgz", + "integrity": "sha512-jMW04n9SxKdKi1ZMGhvUTHBN0EICCRkHemEoE5jm6mTYcqcdas0ATzgUgejlQUHMvpnOZqGB5Xxsv9KxJW1j8A==", + "dev": true, + "dependencies": { + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "caching-transform": "^4.0.0", + "convert-source-map": "^1.7.0", + "decamelize": "^1.2.0", + "find-cache-dir": "^3.2.0", + "find-up": "^4.1.0", + "foreground-child": "^2.0.0", + "get-package-type": "^0.1.0", + "glob": "^7.1.6", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-hook": "^3.0.0", + "istanbul-lib-instrument": "^4.0.0", + "istanbul-lib-processinfo": "^2.0.2", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.0.2", + "make-dir": "^3.0.0", + "node-preload": "^0.2.1", + "p-map": "^3.0.0", + "process-on-spawn": "^1.0.0", + "resolve-from": "^5.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "spawn-wrap": "^2.0.0", + "test-exclude": "^6.0.0", + "yargs": "^15.0.2" + }, + "bin": { + "nyc": "bin/nyc.js" + }, + "engines": { + "node": ">=8.9" + } + }, + "node_modules/nyc/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/nyc/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/nyc/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", + "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-each-series": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-2.2.0.tgz", + "integrity": "sha512-ycIL2+1V32th+8scbpTvyHNaHe02z0sjgh91XXjAk+ZeXoPN4Z46DVUnzdso0aX4KckKw0FNNFHdjZ2UsZvxiA==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz", + "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==", + "dev": true, + "dependencies": { + "p-map": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-is-promise": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-3.0.0.tgz", + "integrity": "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "dependencies": { + "p-try": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "dependencies": { + "p-limit": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/p-map": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", + "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-reduce": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz", + "integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-retry": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.5.0.tgz", + "integrity": "sha512-5Hwh4aVQSu6BEP+w2zKlVXtFAaYQe1qWuVADSgoeVlLjwe/Q/AMSoRR4MDeaAfu8llT+YNbEijWu/YF3m6avkg==", + "dev": true, + "dependencies": { + "@types/retry": "^0.12.0", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/package-hash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz", + "integrity": "sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==", + "dev": true, + "dependencies": { + "graceful-fs": "^4.1.15", + "hasha": "^5.0.0", + "lodash.flattendeep": "^4.4.0", + "release-zalgo": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "dev": true, + "dependencies": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/path-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", + "dev": true, + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/picomatch": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.1.tgz", + "integrity": "sha512-ISBaA8xQNmwELC7eOjqFKMESB2VIqt4PPDD0nsS95b/9dZXvVKOlz9keMSnoGGKcOHXfTvDD6WMaRoSc9UuhRA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-2.1.0.tgz", + "integrity": "sha1-ISZRTKbyq/69FoWW3xi6V4Z/AFg=", + "dev": true, + "dependencies": { + "find-up": "^2.0.0", + "load-json-file": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-dir/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "node_modules/process-on-spawn": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.0.0.tgz", + "integrity": "sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg==", + "dev": true, + "dependencies": { + "fromentries": "^1.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "dev": true, + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-1.1.0.tgz", + "integrity": "sha1-Q2CxfGETatOAeDl/8RQW4Ybc+7g=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dev": true, + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "node_modules/read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", + "dev": true, + "dependencies": { + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/read-pkg-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", + "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", + "dev": true, + "dependencies": { + "find-up": "^2.0.0", + "read-pkg": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readdirp": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", + "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/redent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-2.0.0.tgz", + "integrity": "sha1-wbIAe0LVfrE4kHmzyDM2OdXhzKo=", + "dev": true, + "dependencies": { + "indent-string": "^3.0.0", + "strip-indent": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/redeyed": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", + "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", + "dev": true, + "dependencies": { + "esprima": "~4.0.0" + } + }, + "node_modules/registry-auth-token": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.1.1.tgz", + "integrity": "sha512-9bKS7nTl9+/A1s7tnPeGrUpRcVY+LUh7bfFgzpndALdPfXQBfQV77rQVtqgUV3ti4vc/Ik81Ex8UJDWDQ12zQA==", + "dev": true, + "dependencies": { + "rc": "^1.2.8" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/release-zalgo": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", + "integrity": "sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA=", + "dev": true, + "dependencies": { + "es6-error": "^4.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "node_modules/resolve": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", + "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", + "dev": true, + "dependencies": { + "path-parse": "^1.0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/semantic-release": { + "version": "17.4.3", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-17.4.3.tgz", + "integrity": "sha512-lTOUSrkbaQ+TRs3+BmtJhLtPSyiO7iTGmh5SyuEFqNO8HQbQ4nzXg4UlPrDQasO/C0eFK/V0eCbOzJdjtKBOYw==", + "dev": true, + "dependencies": { + "@semantic-release/commit-analyzer": "^8.0.0", + "@semantic-release/error": "^2.2.0", + "@semantic-release/github": "^7.0.0", + "@semantic-release/npm": "^7.0.0", + "@semantic-release/release-notes-generator": "^9.0.0", + "aggregate-error": "^3.0.0", + "cosmiconfig": "^7.0.0", + "debug": "^4.0.0", + "env-ci": "^5.0.0", + "execa": "^5.0.0", + "figures": "^3.0.0", + "find-versions": "^4.0.0", + "get-stream": "^6.0.0", + "git-log-parser": "^1.2.0", + "hook-std": "^2.0.0", + "hosted-git-info": "^4.0.0", + "lodash": "^4.17.21", + "marked": "^2.0.0", + "marked-terminal": "^4.1.1", + "micromatch": "^4.0.2", + "p-each-series": "^2.1.0", + "p-reduce": "^2.0.0", + "read-pkg-up": "^7.0.0", + "resolve-from": "^5.0.0", + "semver": "^7.3.2", + "semver-diff": "^3.1.1", + "signale": "^1.2.1", + "yargs": "^16.2.0" + }, + "bin": { + "semantic-release": "bin/semantic-release.js" + }, + "engines": { + "node": ">=10.18" + } + }, + "node_modules/semantic-release/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/semantic-release/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/semantic-release/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/semantic-release/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/hosted-git-info": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.0.2.tgz", + "integrity": "sha512-c9OGXbZ3guC/xOlCg1Ci/VgWlwsqDv1yMQL1CWqXDL0hDjXuNcq0zuR4xqPSuasI3kqFDhqSyTjREz5gzq0fXg==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semantic-release/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/semantic-release/node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/semver": { + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semantic-release/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/semantic-release/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/semantic-release/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semantic-release/node_modules/yargs-parser": { + "version": "20.2.7", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", + "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/semver-diff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", + "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", + "dev": true, + "dependencies": { + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/semver-diff/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/semver-regex": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.3.tgz", + "integrity": "sha512-Aqi54Mk9uYTjVexLnR67rTyBusmwd04cLkHy9hNvk3+G3nT2Oyg7E0l4XVbOaNwIvQ3hHeYxGcyEy+mKreyBFQ==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/serialize-javascript": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz", + "integrity": "sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "node_modules/signale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/signale/-/signale-1.4.0.tgz", + "integrity": "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w==", + "dev": true, + "dependencies": { + "chalk": "^2.3.2", + "figures": "^2.0.0", + "pkg-conf": "^2.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/signale/node_modules/figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", + "dev": true, + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spawn-error-forwarder": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz", + "integrity": "sha1-Gv2Uc46ZmwNG17n8NzvlXgdXcCk=", + "dev": true + }, + "node_modules/spawn-wrap": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", + "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", + "dev": true, + "dependencies": { + "foreground-child": "^2.0.0", + "is-windows": "^1.0.2", + "make-dir": "^3.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "which": "^2.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/spawn-wrap/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/spdx-correct": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", + "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", + "dev": true, + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz", + "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==", + "dev": true + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "dev": true, + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", + "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", + "dev": true + }, + "node_modules/split": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz", + "integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==", + "dev": true, + "dependencies": { + "through": "2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/split2": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-2.2.0.tgz", + "integrity": "sha512-RAb22TG39LhI31MbreBgIuKiIKhVsawfTgEGqKHTK87aG+ul/PB8Sqoi3I7kVdRWiCfrKxK3uo4/YUkpNvhPbw==", + "dev": true, + "dependencies": { + "through2": "^2.0.2" + } + }, + "node_modules/split2/node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "node_modules/stream-combiner2": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", + "integrity": "sha1-+02KFCDqNidk4hrUeAOXvry0HL4=", + "dev": true, + "dependencies": { + "duplexer2": "~0.1.0", + "readable-stream": "^2.0.2" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-2.0.0.tgz", + "integrity": "sha1-XvjbKV0B5u1sv3qrlpmNeCJSe2g=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/supports-color": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", + "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/supports-hyperlinks": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz", + "integrity": "sha512-6sXEzV5+I5j8Bmq9/vUphGRM/RJNT9SCURJLjwfOg51heRtguGWDzcaBlgAzKhQa0EVNpPEKzQuBwZ8S8WaCeQ==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/temp-dir": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz", + "integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/tempy": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tempy/-/tempy-0.5.0.tgz", + "integrity": "sha512-VEY96x7gbIRfsxqsafy2l5yVxxp3PhwAGoWMyC2D2Zt5DmEv+2tGiPOrquNRpf21hhGnKLVEsuqleqiZmKG/qw==", + "dev": true, + "dependencies": { + "is-stream": "^2.0.0", + "temp-dir": "^2.0.0", + "type-fest": "^0.12.0", + "unique-string": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tempy/node_modules/is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/tempy/node_modules/type-fest": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.12.0.tgz", + "integrity": "sha512-53RyidyjvkGpnWPMF9bQgFtWp+Sl8O2Rp13VavmJgfAP9WWG6q6TkrKU8iyJdnwnfgHI6k2hTlgqH4aSdjoTbg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-extensions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-1.9.0.tgz", + "integrity": "sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ==", + "dev": true, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "node_modules/through2": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/through2/-/through2-3.0.1.tgz", + "integrity": "sha512-M96dvTalPT3YbYLaKaCuwu+j06D/8Jfib0o/PxbVt6Amhv3dUAtW6rTV1jPgJSBG83I/e04Y6xkVdVhSRhi0ww==", + "dev": true, + "dependencies": { + "readable-stream": "2 || 3" + } + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=", + "dev": true + }, + "node_modules/traverse": { + "version": "0.6.6", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.6.tgz", + "integrity": "sha1-y99WD9e5r2MlAv7UD5GMFX6pcTc=", + "dev": true + }, + "node_modules/trim-newlines": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-2.0.0.tgz", + "integrity": "sha1-tAPQuRvlDDMd/EuC7s6yLD3hbSA=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/trim-off-newlines": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/trim-off-newlines/-/trim-off-newlines-1.0.3.tgz", + "integrity": "sha512-kh6Tu6GbeSNMGfrrZh6Bb/4ZEHV1QlB4xNDBeog8Y9/QwFlKTRyWvY3Fs9tRDAMZliVUwieMgEdIeL/FtqjkJg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dev": true, + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/uglify-js": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.8.0.tgz", + "integrity": "sha512-ugNSTT8ierCsDHso2jkBHXYrU8Y5/fY2ZUprfrJUiD7YpuFvV4jODLFmb3h4btQjqr5Nh4TX4XtgDfCU1WdioQ==", + "dev": true, + "optional": true, + "dependencies": { + "commander": "~2.20.3", + "source-map": "~0.6.1" + }, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/uglify-js/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "optional": true + }, + "node_modules/unique-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", + "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", + "dev": true, + "dependencies": { + "crypto-random-string": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==", + "dev": true + }, + "node_modules/universalify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-1.0.0.tgz", + "integrity": "sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug==", + "dev": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "dev": true, + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=", + "dev": true + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha1-lmRU6HZUYuN2RNNib2dCzotwll0=", + "dev": true, + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "node_modules/wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "dependencies": { + "string-width": "^1.0.2 || 2" + } + }, + "node_modules/wide-align/node_modules/ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/workerpool": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.0.2.tgz", + "integrity": "sha512-DSNyvOpFKrNusaaUwk+ej6cBj1bmhLcBfj80elGk+ZIo5JSkq+unB1dLKEOcNfJDZgjGICfhQ0Q5TbP0PvF4+Q==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.1.tgz", + "integrity": "sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ==", + "dev": true + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "dev": true, + "dependencies": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs-parser": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-10.1.0.tgz", + "integrity": "sha512-VCIyR1wJoEBZUqk5PA+oOBF6ypbwh5aNB3I50guxAL/quggdfs4TtNHQrSazFA3fYZ+tEqfs0zIGlv0c/rgjbQ==", + "dev": true, + "dependencies": { + "camelcase": "^4.1.0" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser/node_modules/camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs-unparser/node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs-unparser/node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", + "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", + "dev": true, + "requires": { + "@babel/highlight": "^7.8.3" + } + }, + "@babel/core": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.13.tgz", + "integrity": "sha512-BQKE9kXkPlXHPeqissfxo0lySWJcYdEP0hdtJOH/iJfDdhOCcgtNCjftCJg3qqauB4h+lz2N6ixM++b9DN1Tcw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.12.13", + "@babel/generator": "^7.12.13", + "@babel/helper-module-transforms": "^7.12.13", + "@babel/helpers": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "requires": { + "@babel/highlight": "^7.12.13" + } + }, + "@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "@babel/generator": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.13.tgz", + "integrity": "sha512-9qQ8Fgo8HaSvHEt6A5+BATP7XktD/AdAnObUeTRz5/e2y3kbrxZgz32qUJJsdmwUvBJzF4AeV21nGTNwv05Mpw==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13", + "jsesc": "^2.5.1", + "source-map": "^0.5.0" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "@babel/helper-function-name": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", + "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", + "dev": true, + "requires": { + "@babel/helper-get-function-arity": "^7.12.13", + "@babel/template": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-get-function-arity": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", + "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-member-expression-to-functions": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.13.tgz", + "integrity": "sha512-B+7nN0gIL8FZ8SvMcF+EPyB21KnCcZHQZFczCxbiNGV/O0rsrSBlWGLzmtBJ3GMjSVMIm4lpFhR+VdVBuIsUcQ==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-module-imports": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.13.tgz", + "integrity": "sha512-NGmfvRp9Rqxy0uHSSVP+SRIW1q31a7Ji10cLBcqSDUngGentY4FRiHOFZFE1CLU5eiL0oE8reH7Tg1y99TDM/g==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-module-transforms": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.13.tgz", + "integrity": "sha512-acKF7EjqOR67ASIlDTupwkKM1eUisNAjaSduo5Cz+793ikfnpe7p4Q7B7EWU2PCoSTPWsQkR7hRUWEIZPiVLGA==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.12.13", + "@babel/helper-replace-supers": "^7.12.13", + "@babel/helper-simple-access": "^7.12.13", + "@babel/helper-split-export-declaration": "^7.12.13", + "@babel/helper-validator-identifier": "^7.12.11", + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13", + "lodash": "^4.17.19" + } + }, + "@babel/helper-optimise-call-expression": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", + "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-replace-supers": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.13.tgz", + "integrity": "sha512-pctAOIAMVStI2TMLhozPKbf5yTEXc0OJa0eENheb4w09SrgOWEs+P4nTOZYJQCqs8JlErGLDPDJTiGIp3ygbLg==", + "dev": true, + "requires": { + "@babel/helper-member-expression-to-functions": "^7.12.13", + "@babel/helper-optimise-call-expression": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-simple-access": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.13.tgz", + "integrity": "sha512-0ski5dyYIHEfwpWGx5GPWhH35j342JaflmCeQmsPWcrOQDtCN6C1zKAVRFVbK53lPW2c9TsuLLSUDf0tIGJ5hA==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", + "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", + "dev": true, + "requires": { + "@babel/types": "^7.12.13" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.12.11", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", + "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==", + "dev": true + }, + "@babel/helpers": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.13.tgz", + "integrity": "sha512-oohVzLRZ3GQEk4Cjhfs9YkJA4TdIDTObdBEZGrd6F/T0GPSnuV6l22eMcxlvcvzVIPH3VTtxbseudM1zIE+rPQ==", + "dev": true, + "requires": { + "@babel/template": "^7.12.13", + "@babel/traverse": "^7.12.13", + "@babel/types": "^7.12.13" + } + }, + "@babel/highlight": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", + "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", + "dev": true, + "requires": { + "chalk": "^2.0.0", + "esutils": "^2.0.2", + "js-tokens": "^4.0.0" + } + }, + "@babel/parser": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.13.tgz", + "integrity": "sha512-z7n7ybOUzaRc3wwqLpAX8UFIXsrVXUJhtNGBwAnLz6d1KUapqyq7ad2La8gZ6CXhHmGAIL32cop8Tst4/PNWLw==", + "dev": true + }, + "@babel/template": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", + "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/types": "^7.12.13" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "requires": { + "@babel/highlight": "^7.12.13" + } + }, + "@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + } + } + }, + "@babel/traverse": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.13.tgz", + "integrity": "sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.12.13", + "@babel/generator": "^7.12.13", + "@babel/helper-function-name": "^7.12.13", + "@babel/helper-split-export-declaration": "^7.12.13", + "@babel/parser": "^7.12.13", + "@babel/types": "^7.12.13", + "debug": "^4.1.0", + "globals": "^11.1.0", + "lodash": "^4.17.19" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", + "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "dev": true, + "requires": { + "@babel/highlight": "^7.12.13" + } + }, + "@babel/highlight": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz", + "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.12.11", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + } + } + }, + "@babel/types": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.13.tgz", + "integrity": "sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.12.11", + "lodash": "^4.17.19", + "to-fast-properties": "^2.0.0" + } + }, + "@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "dependencies": { + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + } + } + }, + "@istanbuljs/schema": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.2.tgz", + "integrity": "sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw==", + "dev": true + }, + "@nodelib/fs.scandir": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz", + "integrity": "sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "2.0.4", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz", + "integrity": "sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q==", + "dev": true + }, + "@nodelib/fs.walk": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz", + "integrity": "sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow==", + "dev": true, + "requires": { + "@nodelib/fs.scandir": "2.1.4", + "fastq": "^1.6.0" + } + }, + "@octokit/auth-token": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.5.tgz", + "integrity": "sha512-BpGYsPgJt05M7/L/5FoE1PiAbdxXFZkX/3kDYcsvd1v6UhlnE5e96dTDr0ezX/EFwciQxf3cNV0loipsURU+WA==", + "dev": true, + "requires": { + "@octokit/types": "^6.0.3" + } + }, + "@octokit/core": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.4.0.tgz", + "integrity": "sha512-6/vlKPP8NF17cgYXqucdshWqmMZGXkuvtcrWCgU5NOI0Pl2GjlmZyWgBMrU8zJ3v2MJlM6++CiB45VKYmhiWWg==", + "dev": true, + "requires": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.4.12", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.11.tgz", + "integrity": "sha512-fUIPpx+pZyoLW4GCs3yMnlj2LfoXTWDUVPTC4V3MUEKZm48W+XYpeWSZCv+vYF1ZABUm2CqnDVf1sFtIYrj7KQ==", + "dev": true, + "requires": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.6.2.tgz", + "integrity": "sha512-WmsIR1OzOr/3IqfG9JIczI8gMJUMzzyx5j0XXQ4YihHtKlQc+u35VpVoOXhlKAlaBntvry1WpAzPl/a+s3n89Q==", + "dev": true, + "requires": { + "@octokit/request": "^5.3.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/openapi-types": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-7.0.0.tgz", + "integrity": "sha512-gV/8DJhAL/04zjTI95a7FhQwS6jlEE0W/7xeYAzuArD0KVAVWDLP2f3vi98hs3HLTczxXdRK/mF0tRoQPpolEw==", + "dev": true + }, + "@octokit/plugin-paginate-rest": { + "version": "2.13.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.13.3.tgz", + "integrity": "sha512-46lptzM9lTeSmIBt/sVP/FLSTPGx6DCzAdSX3PfeJ3mTf4h9sGC26WpaQzMEq/Z44cOcmx8VsOhO+uEgE3cjYg==", + "dev": true, + "requires": { + "@octokit/types": "^6.11.0" + } + }, + "@octokit/plugin-request-log": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.3.tgz", + "integrity": "sha512-4RFU4li238jMJAzLgAwkBAw+4Loile5haQMQr+uhFq27BmyJXcXSKvoQKqh0agsZEiUlW6iSv3FAgvmGkur7OQ==", + "dev": true, + "requires": {} + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.0.1.tgz", + "integrity": "sha512-vvWbPtPqLyIzJ7A4IPdTl+8IeuKAwMJ4LjvmqWOOdfSuqWQYZXq2CEd0hsnkidff2YfKlguzujHs/reBdAx8Sg==", + "dev": true, + "requires": { + "@octokit/types": "^6.13.1", + "deprecation": "^2.3.1" + } + }, + "@octokit/request": { + "version": "5.4.15", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.4.15.tgz", + "integrity": "sha512-6UnZfZzLwNhdLRreOtTkT9n57ZwulCve8q3IT/Z477vThu6snfdkBuhxnChpOKNGxcQ71ow561Qoa6uqLdPtag==", + "dev": true, + "requires": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.0.0", + "@octokit/types": "^6.7.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.1", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.0.5.tgz", + "integrity": "sha512-T/2wcCFyM7SkXzNoyVNWjyVlUwBvW3igM3Btr/eKYiPmucXTtkxt2RBsf6gn3LTzaLSLTQtNmvg+dGsOxQrjZg==", + "dev": true, + "requires": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/rest": { + "version": "18.5.3", + "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-18.5.3.tgz", + "integrity": "sha512-KPAsUCr1DOdLVbZJgGNuE/QVLWEaVBpFQwDAz/2Cnya6uW2wJ/P5RVGk0itx7yyN1aGa8uXm2pri4umEqG1JBA==", + "dev": true, + "requires": { + "@octokit/core": "^3.2.3", + "@octokit/plugin-paginate-rest": "^2.6.2", + "@octokit/plugin-request-log": "^1.0.2", + "@octokit/plugin-rest-endpoint-methods": "5.0.1" + } + }, + "@octokit/types": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.14.2.tgz", + "integrity": "sha512-wiQtW9ZSy4OvgQ09iQOdyXYNN60GqjCL/UdMsepDr1Gr0QzpW6irIKbH3REuAHXAhxkEk9/F2a3Gcs1P6kW5jA==", + "dev": true, + "requires": { + "@octokit/openapi-types": "^7.0.0" + } + }, + "@semantic-release/commit-analyzer": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-8.0.1.tgz", + "integrity": "sha512-5bJma/oB7B4MtwUkZC2Bf7O1MHfi4gWe4mA+MIQ3lsEV0b422Bvl1z5HRpplDnMLHH3EXMoRdEng6Ds5wUqA3A==", + "dev": true, + "requires": { + "conventional-changelog-angular": "^5.0.0", + "conventional-commits-filter": "^2.0.0", + "conventional-commits-parser": "^3.0.7", + "debug": "^4.0.0", + "import-from": "^3.0.0", + "lodash": "^4.17.4", + "micromatch": "^4.0.2" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "@semantic-release/error": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-2.2.0.tgz", + "integrity": "sha512-9Tj/qn+y2j+sjCI3Jd+qseGtHjOAeg7dU2/lVcqIQ9TV3QDaDXDYXcoOHU+7o2Hwh8L8ymL4gfuO7KxDs3q2zg==", + "dev": true + }, + "@semantic-release/github": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-7.2.3.tgz", + "integrity": "sha512-lWjIVDLal+EQBzy697ayUNN8MoBpp+jYIyW2luOdqn5XBH4d9bQGfTnjuLyzARZBHejqh932HVjiH/j4+R7VHw==", + "dev": true, + "requires": { + "@octokit/rest": "^18.0.0", + "@semantic-release/error": "^2.2.0", + "aggregate-error": "^3.0.0", + "bottleneck": "^2.18.1", + "debug": "^4.0.0", + "dir-glob": "^3.0.0", + "fs-extra": "^10.0.0", + "globby": "^11.0.0", + "http-proxy-agent": "^4.0.0", + "https-proxy-agent": "^5.0.0", + "issue-parser": "^6.0.0", + "lodash": "^4.17.4", + "mime": "^2.4.3", + "p-filter": "^2.0.0", + "p-retry": "^4.0.0", + "url-join": "^4.0.0" + }, + "dependencies": { + "fs-extra": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", + "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "dev": true + } + } + }, + "@semantic-release/npm": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-7.0.5.tgz", + "integrity": "sha512-D+oEmsx9aHE1q806NFQwSC9KdBO8ri/VO99eEz0wWbX2jyLqVyWr7t0IjKC8aSnkkQswg/4KN/ZjfF6iz1XOpw==", + "dev": true, + "requires": { + "@semantic-release/error": "^2.2.0", + "aggregate-error": "^3.0.0", + "execa": "^4.0.0", + "fs-extra": "^9.0.0", + "lodash": "^4.17.15", + "nerf-dart": "^1.0.0", + "normalize-url": "^5.0.0", + "npm": "^6.10.3", + "rc": "^1.2.8", + "read-pkg": "^5.0.0", + "registry-auth-token": "^4.0.0", + "semver": "^7.1.2", + "tempy": "^0.5.0" + }, + "dependencies": { + "cross-spawn": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.1.tgz", + "integrity": "sha512-u7v4o84SwFpD32Z8IIcPZ6z1/ie24O6RU3RbtL5Y316l3KuHVPx9ItBgWQ6VlfAFnRnTtMUrsQ9MUUTuEZjogg==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "execa": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.0.0.tgz", + "integrity": "sha512-JbDUxwV3BoT5ZVXQrSVbAiaXhXUkIwvbhPIwZ0N13kX+5yCzOhUNdocxB/UQRuYOHRYYwAxKYwJYc0T4D12pDA==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + } + }, + "get-stream": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", + "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, + "parse-json": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", + "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1", + "lines-and-columns": "^1.1.6" + } + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "requires": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + } + }, + "semver": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.1.3.tgz", + "integrity": "sha512-ekM0zfiA9SCBlsKa2X1hxyxiI4L3B6EbVJkkdgQXnSEEaHlGdvyodMruTiulSRWMMB4NeIuYNMC9rTKTz97GxA==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "@semantic-release/release-notes-generator": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-9.0.1.tgz", + "integrity": "sha512-bOoTiH6SiiR0x2uywSNR7uZcRDl22IpZhj+Q5Bn0v+98MFtOMhCxFhbrKQjhbYoZw7vps1mvMRmFkp/g6R9cvQ==", + "dev": true, + "requires": { + "conventional-changelog-angular": "^5.0.0", + "conventional-changelog-writer": "^4.0.0", + "conventional-commits-filter": "^2.0.0", + "conventional-commits-parser": "^3.0.0", + "debug": "^4.0.0", + "get-stream": "^5.0.0", + "import-from": "^3.0.0", + "into-stream": "^5.0.0", + "lodash": "^4.17.4", + "read-pkg-up": "^7.0.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "get-stream": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", + "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "parse-json": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", + "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1", + "lines-and-columns": "^1.1.6" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "requires": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "dependencies": { + "type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true + } + } + }, + "read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "requires": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + } + } + }, + "@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "dev": true + }, + "@types/normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", + "dev": true + }, + "@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "dev": true + }, + "@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "dev": true + }, + "@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true + }, + "agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "requires": { + "debug": "4" + } + }, + "aggregate-error": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.0.1.tgz", + "integrity": "sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA==", + "dev": true, + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "dependencies": { + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true + } + } + }, + "ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true + }, + "ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "requires": { + "type-fest": "^0.21.3" + }, + "dependencies": { + "type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true + } + } + }, + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "ansicolors": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", + "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", + "dev": true + }, + "anymatch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", + "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "append-transform": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz", + "integrity": "sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg==", + "dev": true, + "requires": { + "default-require-extensions": "^3.0.0" + } + }, + "archy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", + "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "argv-formatter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/argv-formatter/-/argv-formatter-1.0.0.tgz", + "integrity": "sha1-oMoMvCmltz6Dbuvhy/bF4OTrgvk=", + "dev": true + }, + "array-find-index": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", + "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", + "dev": true + }, + "array-ify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-ify/-/array-ify-1.0.0.tgz", + "integrity": "sha1-nlKHYrSpBmrRY6aWKjZEGOlibs4=", + "dev": true + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true + }, + "arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", + "dev": true + }, + "at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "before-after-hook": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.1.tgz", + "integrity": "sha512-/6FKxSTWoJdbsLDF8tdIjaRiFXiE6UHsEHE3OPI/cwPURCVi1ukP0gmLn7XWEiFk5TcwQjjY5PWsU+j+tgXgmw==", + "dev": true + }, + "binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true + }, + "bottleneck": { + "version": "2.19.5", + "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", + "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "caching-transform": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", + "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", + "dev": true, + "requires": { + "hasha": "^5.0.0", + "make-dir": "^3.0.0", + "package-hash": "^4.0.0", + "write-file-atomic": "^3.0.0" + } + }, + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true + }, + "camelcase": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", + "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", + "dev": true + }, + "camelcase-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-4.2.0.tgz", + "integrity": "sha1-oqpfsa9oh1glnDLBQUJteJI7m3c=", + "dev": true, + "requires": { + "camelcase": "^4.1.0", + "map-obj": "^2.0.0", + "quick-lru": "^1.0.0" + } + }, + "cardinal": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", + "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", + "dev": true, + "requires": { + "ansicolors": "~0.3.2", + "redeyed": "~2.1.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chokidar": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz", + "integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==", + "dev": true, + "requires": { + "anymatch": "~3.1.1", + "braces": "~3.0.2", + "fsevents": "~2.1.2", + "glob-parent": "~5.1.0", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.5.0" + } + }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true + }, + "cli-table": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/cli-table/-/cli-table-0.3.6.tgz", + "integrity": "sha512-ZkNZbnZjKERTY5NwC2SeMeLeifSPq/pubeRoTpdr3WchLlnZg6hEgvHkK5zL7KNFdd9PmHN8lxrENUwI3cE8vQ==", + "dev": true, + "requires": { + "colors": "1.0.3" + } + }, + "cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "colors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", + "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", + "dev": true + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", + "dev": true + }, + "compare-func": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/compare-func/-/compare-func-1.3.2.tgz", + "integrity": "sha1-md0LpFfh+bxyKxLAjsM+6rMfpkg=", + "dev": true, + "requires": { + "array-ify": "^1.0.0", + "dot-prop": "^3.0.0" + } + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "conventional-changelog-angular": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-5.0.6.tgz", + "integrity": "sha512-QDEmLa+7qdhVIv8sFZfVxU1VSyVvnXPsxq8Vam49mKUcO1Z8VTLEJk9uI21uiJUsnmm0I4Hrsdc9TgkOQo9WSA==", + "dev": true, + "requires": { + "compare-func": "^1.3.1", + "q": "^1.5.1" + } + }, + "conventional-changelog-conventionalcommits": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-4.2.3.tgz", + "integrity": "sha512-atGa+R4vvEhb8N/8v3IoW59gCBJeeFiX6uIbPu876ENAmkMwsenyn0R21kdDHJFLQdy6zW4J6b4xN8KI3b9oww==", + "dev": true, + "requires": { + "compare-func": "^1.3.1", + "lodash": "^4.17.15", + "q": "^1.5.1" + } + }, + "conventional-changelog-writer": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/conventional-changelog-writer/-/conventional-changelog-writer-4.0.11.tgz", + "integrity": "sha512-g81GQOR392I+57Cw3IyP1f+f42ME6aEkbR+L7v1FBBWolB0xkjKTeCWVguzRrp6UiT1O6gBpJbEy2eq7AnV1rw==", + "dev": true, + "requires": { + "compare-func": "^1.3.1", + "conventional-commits-filter": "^2.0.2", + "dateformat": "^3.0.0", + "handlebars": "^4.4.0", + "json-stringify-safe": "^5.0.1", + "lodash": "^4.17.15", + "meow": "^5.0.0", + "semver": "^6.0.0", + "split": "^1.0.0", + "through2": "^3.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "conventional-commits-filter": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-2.0.2.tgz", + "integrity": "sha512-WpGKsMeXfs21m1zIw4s9H5sys2+9JccTzpN6toXtxhpw2VNF2JUXwIakthKBy+LN4DvJm+TzWhxOMWOs1OFCFQ==", + "dev": true, + "requires": { + "lodash.ismatch": "^4.4.0", + "modify-values": "^1.0.0" + } + }, + "conventional-commits-parser": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-3.0.8.tgz", + "integrity": "sha512-YcBSGkZbYp7d+Cr3NWUeXbPDFUN6g3SaSIzOybi8bjHL5IJ5225OSCxJJ4LgziyEJ7AaJtE9L2/EU6H7Nt/DDQ==", + "dev": true, + "requires": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.0.4", + "lodash": "^4.17.15", + "meow": "^5.0.0", + "split2": "^2.0.0", + "through2": "^3.0.0", + "trim-off-newlines": "^1.0.0" + } + }, + "convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cosmiconfig": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.0.tgz", + "integrity": "sha512-pondGvTuVYDk++upghXJabWzL6Kxu6f26ljFw64Swq9v6sQPUL3EUlVDV56diOjpCayKihL6hVe8exIACU4XcA==", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "dependencies": { + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true + } + } + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "crypto-random-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", + "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", + "dev": true + }, + "currently-unhandled": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", + "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", + "dev": true, + "requires": { + "array-find-index": "^1.0.1" + } + }, + "dateformat": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-3.0.3.tgz", + "integrity": "sha512-jyCETtSl3VMZMWeRo7iY1FL19ges1t55hMo5yaam4Jrsm5EPL89UQkoQRyiI+Yf4k8r2ZpdngkV8hr1lIdjb3Q==", + "dev": true + }, + "debug": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "decamelize-keys": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", + "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", + "dev": true, + "requires": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + }, + "dependencies": { + "map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "dev": true + } + } + }, + "deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true + }, + "default-require-extensions": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.0.tgz", + "integrity": "sha512-ek6DpXq/SCpvjhpFsLFRVtIxJCRw6fUR42lYMVZuUMK7n8eMz4Uh5clckdBjEpLhn/gEBZo7hDJnJcwdKLKQjg==", + "dev": true, + "requires": { + "strip-bom": "^4.0.0" + }, + "dependencies": { + "strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true + } + } + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==", + "dev": true + }, + "diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true + }, + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "requires": { + "path-type": "^4.0.0" + }, + "dependencies": { + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true + } + } + }, + "dot-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-3.0.0.tgz", + "integrity": "sha1-G3CK8JSknJoOfbyteQq6U52sEXc=", + "dev": true, + "requires": { + "is-obj": "^1.0.0" + } + }, + "duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "dev": true, + "requires": { + "readable-stream": "^2.0.2" + } + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "env-ci": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-5.0.2.tgz", + "integrity": "sha512-Xc41mKvjouTXD3Oy9AqySz1IeyvJvHZ20Twf5ZLYbNpPPIuCnL/qHCmNlD01LoNy0JTunw9HPYVptD19Ac7Mbw==", + "dev": true, + "requires": { + "execa": "^4.0.0", + "java-properties": "^1.0.0" + }, + "dependencies": { + "execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + } + }, + "get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + } + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es6-error": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", + "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", + "dev": true + }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true + }, + "execa": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.0.0.tgz", + "integrity": "sha512-ov6w/2LCiuyO4RLYGdpFGjkcs0wMTgGE8PrkTHikeUy5iJekXyPIKUjifk5CsE0pt7sMCrMZ3YNqoCj6idQOnQ==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "dependencies": { + "human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "signal-exit": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", + "dev": true + } + } + }, + "fast-glob": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.5.tgz", + "integrity": "sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.0", + "merge2": "^1.3.0", + "micromatch": "^4.0.2", + "picomatch": "^2.2.1" + } + }, + "fastq": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz", + "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==", + "dev": true, + "requires": { + "reusify": "^1.0.4" + } + }, + "figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "find-cache-dir": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", + "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", + "dev": true, + "requires": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "find-versions": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-4.0.0.tgz", + "integrity": "sha512-wgpWy002tA+wgmO27buH/9KzyEOQnKsG/R0yrcjPT9BOFm0zRBVQbZ95nRGXWMywS8YR5knRbpohio0bcJABxQ==", + "dev": true, + "requires": { + "semver-regex": "^3.1.2" + } + }, + "flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true + }, + "foreground-child": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", + "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "signal-exit": "^3.0.2" + }, + "dependencies": { + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fromentries": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz", + "integrity": "sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg==", + "dev": true + }, + "fs-extra": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.0.0.tgz", + "integrity": "sha512-pmEYSk3vYsG/bF651KPUXZ+hvjpgWYw/Gc7W9NFUe3ZVLczKKWIij3IKpOrQcdw4TILtibFslZ0UmR8Vvzig4g==", + "dev": true, + "requires": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^1.0.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", + "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", + "dev": true, + "optional": true + }, + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true + }, + "get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true + }, + "git-log-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/git-log-parser/-/git-log-parser-1.2.0.tgz", + "integrity": "sha1-LmpMGxP8AAKCB7p5WnrDFme5/Uo=", + "dev": true, + "requires": { + "argv-formatter": "~1.0.0", + "spawn-error-forwarder": "~1.0.0", + "split2": "~1.0.0", + "stream-combiner2": "~1.1.1", + "through2": "~2.0.0", + "traverse": "~0.6.6" + }, + "dependencies": { + "split2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-1.0.0.tgz", + "integrity": "sha1-UuLiIdiMdfmnP5BVbiY/+WdysxQ=", + "dev": true, + "requires": { + "through2": "~2.0.0" + } + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + }, + "globby": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.3.tgz", + "integrity": "sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg==", + "dev": true, + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.1.1", + "ignore": "^5.1.4", + "merge2": "^1.3.0", + "slash": "^3.0.0" + } + }, + "graceful-fs": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", + "dev": true + }, + "growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true + }, + "handlebars": { + "version": "4.7.7", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz", + "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==", + "dev": true, + "requires": { + "minimist": "^1.2.5", + "neo-async": "^2.6.0", + "source-map": "^0.6.1", + "uglify-js": "^3.1.4", + "wordwrap": "^1.0.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true + } + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "hasha": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz", + "integrity": "sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==", + "dev": true, + "requires": { + "is-stream": "^2.0.0", + "type-fest": "^0.8.0" + }, + "dependencies": { + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + } + } + }, + "he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true + }, + "hook-std": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hook-std/-/hook-std-2.0.0.tgz", + "integrity": "sha512-zZ6T5WcuBMIUVh49iPQS9t977t7C0l7OtHrpeMb5uk48JdflRX0NSFvCekfYNmGQETnLq9W/isMyHl69kxGi8g==", + "dev": true + }, + "hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, + "html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "dev": true, + "requires": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + } + }, + "https-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", + "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", + "dev": true, + "requires": { + "agent-base": "6", + "debug": "4" + } + }, + "human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "dev": true + }, + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + }, + "import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + } + } + }, + "import-from": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/import-from/-/import-from-3.0.0.tgz", + "integrity": "sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ==", + "dev": true, + "requires": { + "resolve-from": "^5.0.0" + } + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true + }, + "indent-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", + "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "ini": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", + "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==", + "dev": true + }, + "into-stream": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-5.1.1.tgz", + "integrity": "sha512-krrAJ7McQxGGmvaYbB7Q1mcA+cRwg9Ij2RfWIeVesNBgVDZmzY/Fa4IpZUT3bmdRzMzdf/mzltCG2Dq99IZGBA==", + "dev": true, + "requires": { + "from2": "^2.3.0", + "p-is-promise": "^3.0.0" + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=", + "dev": true + }, + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "dev": true + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true + }, + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true + }, + "is-text-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-text-path/-/is-text-path-1.0.1.tgz", + "integrity": "sha1-Thqg+1G/vLPpJogAE5cgLBd1tm4=", + "dev": true, + "requires": { + "text-extensions": "^1.0.0" + } + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "issue-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz", + "integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==", + "dev": true, + "requires": { + "lodash.capitalize": "^4.2.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.uniqby": "^4.7.0" + } + }, + "istanbul-lib-coverage": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz", + "integrity": "sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg==", + "dev": true + }, + "istanbul-lib-hook": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz", + "integrity": "sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ==", + "dev": true, + "requires": { + "append-transform": "^2.0.0" + } + }, + "istanbul-lib-instrument": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz", + "integrity": "sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==", + "dev": true, + "requires": { + "@babel/core": "^7.7.5", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.0.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "istanbul-lib-processinfo": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.2.tgz", + "integrity": "sha512-kOwpa7z9hme+IBPZMzQ5vdQj8srYgAtaRqeI48NGmAQ+/5yKiHLV0QbYqQpxsdEF0+w14SoB8YbnHKcXE2KnYw==", + "dev": true, + "requires": { + "archy": "^1.0.0", + "cross-spawn": "^7.0.0", + "istanbul-lib-coverage": "^3.0.0-alpha.1", + "make-dir": "^3.0.0", + "p-map": "^3.0.0", + "rimraf": "^3.0.0", + "uuid": "^3.3.3" + }, + "dependencies": { + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "requires": { + "aggregate-error": "^3.0.0" + } + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "dev": true, + "requires": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^3.0.0", + "supports-color": "^7.1.0" + }, + "dependencies": { + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "istanbul-lib-source-maps": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz", + "integrity": "sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + } + }, + "istanbul-reports": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.0.2.tgz", + "integrity": "sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw==", + "dev": true, + "requires": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + } + }, + "java-properties": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/java-properties/-/java-properties-1.0.2.tgz", + "integrity": "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ==", + "dev": true + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "json5": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", + "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", + "dev": true, + "requires": { + "minimist": "^1.2.5" + }, + "dependencies": { + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + } + } + }, + "jsonfile": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.0.1.tgz", + "integrity": "sha512-jR2b5v7d2vIOust+w3wtFKZIfpC2pnRmFAhAC/BuweZFQR8qZzxH1OyrQ10HmdVYiXWkYUqPVsz91cG7EL2FBg==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^1.0.0" + } + }, + "jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", + "dev": true + }, + "JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "dev": true, + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } + }, + "lines-and-columns": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", + "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", + "dev": true + }, + "load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true + }, + "lodash.capitalize": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz", + "integrity": "sha1-+CbJtOKoUR2E46yinbBeGk87cqk=", + "dev": true + }, + "lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha1-ZHYsSGGAglGKw99Mz11YhtriA0c=", + "dev": true + }, + "lodash.flattendeep": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", + "integrity": "sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI=", + "dev": true + }, + "lodash.ismatch": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz", + "integrity": "sha1-dWy1FQyjum8RCFp4hJZF8Yj4Xzc=", + "dev": true + }, + "lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=", + "dev": true + }, + "lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=", + "dev": true + }, + "lodash.toarray": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.toarray/-/lodash.toarray-4.4.0.tgz", + "integrity": "sha1-JMS/zWsvuji/0FlNsRedjptlZWE=", + "dev": true + }, + "lodash.uniqby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", + "integrity": "sha1-2ZwHpmnp5tJOE2Lf4mbGdhavEwI=", + "dev": true + }, + "log-symbols": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", + "dev": true, + "requires": { + "chalk": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "loud-rejection": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", + "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", + "dev": true, + "requires": { + "currently-unhandled": "^0.4.1", + "signal-exit": "^3.0.0" + } + }, + "lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "requires": { + "yallist": "^4.0.0" + } + }, + "make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "requires": { + "semver": "^6.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "map-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-2.0.0.tgz", + "integrity": "sha1-plzSkIepJZi4eRJXpSPgISIqwfk=", + "dev": true + }, + "marked": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/marked/-/marked-2.0.3.tgz", + "integrity": "sha512-5otztIIcJfPc2qGTN8cVtOJEjNJZ0jwa46INMagrYfk0EvqtRuEHLsEe0LrFS0/q+ZRKT0+kXK7P2T1AN5lWRA==", + "dev": true + }, + "marked-terminal": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-4.1.1.tgz", + "integrity": "sha512-t7Mdf6T3PvOEyN01c3tYxDzhyKZ8xnkp8Rs6Fohno63L/0pFTJ5Qtwto2AQVuDtbQiWzD+4E5AAu1Z2iLc8miQ==", + "dev": true, + "requires": { + "ansi-escapes": "^4.3.1", + "cardinal": "^2.1.1", + "chalk": "^4.1.0", + "cli-table": "^0.3.1", + "node-emoji": "^1.10.0", + "supports-hyperlinks": "^2.1.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "meow": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-5.0.0.tgz", + "integrity": "sha512-CbTqYU17ABaLefO8vCU153ZZlprKYWDljcndKKDCFcYQITzWCXZAVk4QMFZPgvzrnUQ3uItnIE/LoUOwrT15Ig==", + "dev": true, + "requires": { + "camelcase-keys": "^4.0.0", + "decamelize-keys": "^1.0.0", + "loud-rejection": "^1.0.0", + "minimist-options": "^3.0.1", + "normalize-package-data": "^2.3.4", + "read-pkg-up": "^3.0.0", + "redent": "^2.0.0", + "trim-newlines": "^2.0.0", + "yargs-parser": "^10.0.0" + } + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true + }, + "micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + } + }, + "mime": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", + "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==", + "dev": true + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist-options": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-3.0.2.tgz", + "integrity": "sha512-FyBrT/d0d4+uiZRbqznPXqw3IpZZG3gl3wKWiX784FycUKVwBt0uLBFkQrtE4tZOrgo78nZp2jnKz3L65T5LdQ==", + "dev": true, + "requires": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0" + } + }, + "mocha": { + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.2.1.tgz", + "integrity": "sha512-cuLBVfyFfFqbNR0uUKbDGXKGk+UDFe6aR4os78XIrMQpZl/nv7JYHcvP5MFIAb374b2zFXsdgEGwmzMtP0Xg8w==", + "dev": true, + "requires": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.4.3", + "debug": "4.2.0", + "diff": "4.0.2", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.6", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "3.14.0", + "log-symbols": "4.0.0", + "minimatch": "3.0.4", + "ms": "2.1.2", + "nanoid": "3.1.12", + "serialize-javascript": "5.0.1", + "strip-json-comments": "3.1.1", + "supports-color": "7.2.0", + "which": "2.0.2", + "wide-align": "1.1.3", + "workerpool": "6.0.2", + "yargs": "13.3.2", + "yargs-parser": "13.1.2", + "yargs-unparser": "2.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "requires": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true + }, + "find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "requires": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "requires": { + "p-locate": "^5.0.0" + } + }, + "p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "requires": { + "yocto-queue": "^0.1.0" + } + }, + "p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "requires": { + "p-limit": "^3.0.2" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + } + }, + "yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dev": true, + "requires": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + } + } + }, + "yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } + }, + "modify-values": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/modify-values/-/modify-values-1.0.1.tgz", + "integrity": "sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==", + "dev": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "nanoid": { + "version": "3.1.12", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.12.tgz", + "integrity": "sha512-1qstj9z5+x491jfiC4Nelk+f8XBad7LN20PmyWINJEMRSf3wcAjAWysw1qaA8z6NSKe2sjq1hRSDpBH5paCb6A==", + "dev": true + }, + "neo-async": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", + "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==", + "dev": true + }, + "nerf-dart": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz", + "integrity": "sha1-5tq3/r9a2Bbqgc9cYpxaDr3nLBo=", + "dev": true + }, + "node-emoji": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.10.0.tgz", + "integrity": "sha512-Yt3384If5H6BYGVHiHwTL+99OzJKHhgp82S8/dktEK73T26BazdgZ4JZh92xSVtGNJvz9UbXdNAc5hcrXV42vw==", + "dev": true, + "requires": { + "lodash.toarray": "^4.4.0" + } + }, + "node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dev": true, + "requires": { + "whatwg-url": "^5.0.0" + } + }, + "node-preload": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz", + "integrity": "sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ==", + "dev": true, + "requires": { + "process-on-spawn": "^1.0.0" + } + }, + "normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "normalize-url": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-5.3.1.tgz", + "integrity": "sha512-K1c7+vaAP+Yh5bOGmA10PGPpp+6h7WZrl7GwqKhUflBc9flU9pzG27DDeB9+iuhZkE3BJZOcgN1P/2sS5pqrWw==", + "dev": true + }, + "npm": { + "version": "6.14.6", + "resolved": "https://registry.npmjs.org/npm/-/npm-6.14.6.tgz", + "integrity": "sha512-axnz6iHFK6WPE0js/+mRp+4IOwpHn5tJEw5KB6FiCU764zmffrhsYHbSHi2kKqNkRBt53XasXjngZfBD3FQzrQ==", + "dev": true, + "requires": { + "abbrev": "~1.1.1", + "ansicolors": "~0.3.2", + "ansistyles": "~0.1.3", + "aproba": "^2.0.0", + "archy": "~1.0.0", + "bin-links": "^1.1.7", + "bluebird": "^3.5.5", + "byte-size": "^5.0.1", + "cacache": "^12.0.3", + "call-limit": "^1.1.1", + "chownr": "^1.1.4", + "ci-info": "^2.0.0", + "cli-columns": "^3.1.2", + "cli-table3": "^0.5.1", + "cmd-shim": "^3.0.3", + "columnify": "~1.5.4", + "config-chain": "^1.1.12", + "debuglog": "*", + "detect-indent": "~5.0.0", + "detect-newline": "^2.1.0", + "dezalgo": "~1.0.3", + "editor": "~1.0.0", + "figgy-pudding": "^3.5.1", + "find-npm-prefix": "^1.0.2", + "fs-vacuum": "~1.2.10", + "fs-write-stream-atomic": "~1.0.10", + "gentle-fs": "^2.3.0", + "glob": "^7.1.6", + "graceful-fs": "^4.2.4", + "has-unicode": "~2.0.1", + "hosted-git-info": "^2.8.8", + "iferr": "^1.0.2", + "imurmurhash": "*", + "infer-owner": "^1.0.4", + "inflight": "~1.0.6", + "inherits": "^2.0.4", + "ini": "^1.3.5", + "init-package-json": "^1.10.3", + "is-cidr": "^3.0.0", + "json-parse-better-errors": "^1.0.2", + "JSONStream": "^1.3.5", + "lazy-property": "~1.0.0", + "libcipm": "^4.0.7", + "libnpm": "^3.0.1", + "libnpmaccess": "^3.0.2", + "libnpmhook": "^5.0.3", + "libnpmorg": "^1.0.1", + "libnpmsearch": "^2.0.2", + "libnpmteam": "^1.0.2", + "libnpx": "^10.2.2", + "lock-verify": "^2.1.0", + "lockfile": "^1.0.4", + "lodash._baseindexof": "*", + "lodash._baseuniq": "~4.6.0", + "lodash._bindcallback": "*", + "lodash._cacheindexof": "*", + "lodash._createcache": "*", + "lodash._getnative": "*", + "lodash.clonedeep": "~4.5.0", + "lodash.restparam": "*", + "lodash.union": "~4.6.0", + "lodash.uniq": "~4.5.0", + "lodash.without": "~4.4.0", + "lru-cache": "^5.1.1", + "meant": "~1.0.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.5", + "move-concurrently": "^1.0.1", + "node-gyp": "^5.1.0", + "nopt": "^4.0.3", + "normalize-package-data": "^2.5.0", + "npm-audit-report": "^1.3.2", + "npm-cache-filename": "~1.0.2", + "npm-install-checks": "^3.0.2", + "npm-lifecycle": "^3.1.4", + "npm-package-arg": "^6.1.1", + "npm-packlist": "^1.4.8", + "npm-pick-manifest": "^3.0.2", + "npm-profile": "^4.0.4", + "npm-registry-fetch": "^4.0.5", + "npm-user-validate": "~1.0.0", + "npmlog": "~4.1.2", + "once": "~1.4.0", + "opener": "^1.5.1", + "osenv": "^0.1.5", + "pacote": "^9.5.12", + "path-is-inside": "~1.0.2", + "promise-inflight": "~1.0.1", + "qrcode-terminal": "^0.12.0", + "query-string": "^6.8.2", + "qw": "~1.0.1", + "read": "~1.0.7", + "read-cmd-shim": "^1.0.5", + "read-installed": "~4.0.3", + "read-package-json": "^2.1.1", + "read-package-tree": "^5.3.1", + "readable-stream": "^3.6.0", + "readdir-scoped-modules": "^1.1.0", + "request": "^2.88.0", + "retry": "^0.12.0", + "rimraf": "^2.7.1", + "safe-buffer": "^5.1.2", + "semver": "^5.7.1", + "sha": "^3.0.0", + "slide": "~1.1.6", + "sorted-object": "~2.0.1", + "sorted-union-stream": "~2.1.3", + "ssri": "^6.0.1", + "stringify-package": "^1.0.1", + "tar": "^4.4.13", + "text-table": "~0.2.0", + "tiny-relative-date": "^1.3.0", + "uid-number": "0.0.6", + "umask": "~1.1.0", + "unique-filename": "^1.1.1", + "unpipe": "~1.0.0", + "update-notifier": "^2.5.0", + "uuid": "^3.3.3", + "validate-npm-package-license": "^3.0.4", + "validate-npm-package-name": "~3.0.0", + "which": "^1.3.1", + "worker-farm": "^1.7.0", + "write-file-atomic": "^2.4.3" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true, + "dev": true + }, + "agent-base": { + "version": "4.3.0", + "bundled": true, + "dev": true, + "requires": { + "es6-promisify": "^5.0.0" + } + }, + "agentkeepalive": { + "version": "3.5.2", + "bundled": true, + "dev": true, + "requires": { + "humanize-ms": "^1.2.1" + } + }, + "ajv": { + "version": "5.5.2", + "bundled": true, + "dev": true, + "requires": { + "co": "^4.6.0", + "fast-deep-equal": "^1.0.0", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.3.0" + } + }, + "ansi-align": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^2.0.0" + } + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true, + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "bundled": true, + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "ansicolors": { + "version": "0.3.2", + "bundled": true, + "dev": true + }, + "ansistyles": { + "version": "0.1.3", + "bundled": true, + "dev": true + }, + "aproba": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "archy": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "are-we-there-yet": { + "version": "1.1.4", + "bundled": true, + "dev": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "asap": { + "version": "2.0.6", + "bundled": true, + "dev": true + }, + "asn1": { + "version": "0.2.4", + "bundled": true, + "dev": true, + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "asynckit": { + "version": "0.4.0", + "bundled": true, + "dev": true + }, + "aws-sign2": { + "version": "0.7.0", + "bundled": true, + "dev": true + }, + "aws4": { + "version": "1.8.0", + "bundled": true, + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "bin-links": { + "version": "1.1.7", + "bundled": true, + "dev": true, + "requires": { + "bluebird": "^3.5.3", + "cmd-shim": "^3.0.0", + "gentle-fs": "^2.3.0", + "graceful-fs": "^4.1.15", + "npm-normalize-package-bin": "^1.0.0", + "write-file-atomic": "^2.3.0" + } + }, + "bluebird": { + "version": "3.5.5", + "bundled": true, + "dev": true + }, + "boxen": { + "version": "1.3.0", + "bundled": true, + "dev": true, + "requires": { + "ansi-align": "^2.0.0", + "camelcase": "^4.0.0", + "chalk": "^2.0.1", + "cli-boxes": "^1.0.0", + "string-width": "^2.0.0", + "term-size": "^1.2.0", + "widest-line": "^2.0.0" + } + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "buffer-from": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "builtins": { + "version": "1.0.3", + "bundled": true, + "dev": true + }, + "byline": { + "version": "5.0.0", + "bundled": true, + "dev": true + }, + "byte-size": { + "version": "5.0.1", + "bundled": true, + "dev": true + }, + "cacache": { + "version": "12.0.3", + "bundled": true, + "dev": true, + "requires": { + "bluebird": "^3.5.5", + "chownr": "^1.1.1", + "figgy-pudding": "^3.5.1", + "glob": "^7.1.4", + "graceful-fs": "^4.1.15", + "infer-owner": "^1.0.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.3", + "ssri": "^6.0.1", + "unique-filename": "^1.1.1", + "y18n": "^4.0.0" + } + }, + "call-limit": { + "version": "1.1.1", + "bundled": true, + "dev": true + }, + "camelcase": { + "version": "4.1.0", + "bundled": true, + "dev": true + }, + "capture-stack-trace": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "caseless": { + "version": "0.12.0", + "bundled": true, + "dev": true + }, + "chalk": { + "version": "2.4.1", + "bundled": true, + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chownr": { + "version": "1.1.4", + "bundled": true, + "dev": true + }, + "ci-info": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "cidr-regex": { + "version": "2.0.10", + "bundled": true, + "dev": true, + "requires": { + "ip-regex": "^2.1.0" + } + }, + "cli-boxes": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "cli-columns": { + "version": "3.1.2", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^2.0.0", + "strip-ansi": "^3.0.1" + } + }, + "cli-table3": { + "version": "0.5.1", + "bundled": true, + "dev": true, + "requires": { + "colors": "^1.1.2", + "object-assign": "^4.1.0", + "string-width": "^2.1.1" + } + }, + "cliui": { + "version": "4.1.0", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^2.1.1", + "strip-ansi": "^4.0.0", + "wrap-ansi": "^2.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "bundled": true, + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "clone": { + "version": "1.0.4", + "bundled": true, + "dev": true + }, + "cmd-shim": { + "version": "3.0.3", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "mkdirp": "~0.5.0" + } + }, + "co": { + "version": "4.6.0", + "bundled": true, + "dev": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "color-convert": { + "version": "1.9.1", + "bundled": true, + "dev": true, + "requires": { + "color-name": "^1.1.1" + } + }, + "color-name": { + "version": "1.1.3", + "bundled": true, + "dev": true + }, + "colors": { + "version": "1.3.3", + "bundled": true, + "dev": true, + "optional": true + }, + "columnify": { + "version": "1.5.4", + "bundled": true, + "dev": true, + "requires": { + "strip-ansi": "^3.0.0", + "wcwidth": "^1.0.0" + } + }, + "combined-stream": { + "version": "1.0.6", + "bundled": true, + "dev": true, + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "concat-map": { + "version": "0.0.1", + "bundled": true, + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "bundled": true, + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "config-chain": { + "version": "1.1.12", + "bundled": true, + "dev": true, + "requires": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "configstore": { + "version": "3.1.2", + "bundled": true, + "dev": true, + "requires": { + "dot-prop": "^4.1.0", + "graceful-fs": "^4.1.2", + "make-dir": "^1.0.0", + "unique-string": "^1.0.0", + "write-file-atomic": "^2.0.0", + "xdg-basedir": "^3.0.0" + } + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "copy-concurrently": { + "version": "1.0.5", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + }, + "dependencies": { + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true + }, + "iferr": { + "version": "0.1.5", + "bundled": true, + "dev": true + } + } + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "create-error-class": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "requires": { + "capture-stack-trace": "^1.0.0" + } + }, + "cross-spawn": { + "version": "5.1.0", + "bundled": true, + "dev": true, + "requires": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "dependencies": { + "lru-cache": { + "version": "4.1.5", + "bundled": true, + "dev": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "yallist": { + "version": "2.1.2", + "bundled": true, + "dev": true + } + } + }, + "crypto-random-string": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "cyclist": { + "version": "0.2.2", + "bundled": true, + "dev": true + }, + "dashdash": { + "version": "1.14.1", + "bundled": true, + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "debug": { + "version": "3.1.0", + "bundled": true, + "dev": true, + "requires": { + "ms": "2.0.0" + }, + "dependencies": { + "ms": { + "version": "2.0.0", + "bundled": true, + "dev": true + } + } + }, + "debuglog": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "decamelize": { + "version": "1.2.0", + "bundled": true, + "dev": true + }, + "decode-uri-component": { + "version": "0.2.0", + "bundled": true, + "dev": true + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true, + "dev": true + }, + "defaults": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "requires": { + "clone": "^1.0.2" + } + }, + "define-properties": { + "version": "1.1.3", + "bundled": true, + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "delayed-stream": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "detect-indent": { + "version": "5.0.0", + "bundled": true, + "dev": true + }, + "detect-newline": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "dezalgo": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "requires": { + "asap": "^2.0.0", + "wrappy": "1" + } + }, + "dot-prop": { + "version": "4.2.0", + "bundled": true, + "dev": true, + "requires": { + "is-obj": "^1.0.0" + } + }, + "dotenv": { + "version": "5.0.1", + "bundled": true, + "dev": true + }, + "duplexer3": { + "version": "0.1.4", + "bundled": true, + "dev": true + }, + "duplexify": { + "version": "3.6.0", + "bundled": true, + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "ecc-jsbn": { + "version": "0.1.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "editor": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "encoding": { + "version": "0.1.12", + "bundled": true, + "dev": true, + "requires": { + "iconv-lite": "~0.4.13" + } + }, + "end-of-stream": { + "version": "1.4.1", + "bundled": true, + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "env-paths": { + "version": "2.2.0", + "bundled": true, + "dev": true + }, + "err-code": { + "version": "1.1.2", + "bundled": true, + "dev": true + }, + "errno": { + "version": "0.1.7", + "bundled": true, + "dev": true, + "requires": { + "prr": "~1.0.1" + } + }, + "es-abstract": { + "version": "1.12.0", + "bundled": true, + "dev": true, + "requires": { + "es-to-primitive": "^1.1.1", + "function-bind": "^1.1.1", + "has": "^1.0.1", + "is-callable": "^1.1.3", + "is-regex": "^1.0.4" + } + }, + "es-to-primitive": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + } + }, + "es6-promise": { + "version": "4.2.8", + "bundled": true, + "dev": true + }, + "es6-promisify": { + "version": "5.0.0", + "bundled": true, + "dev": true, + "requires": { + "es6-promise": "^4.0.3" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "bundled": true, + "dev": true + }, + "execa": { + "version": "0.7.0", + "bundled": true, + "dev": true, + "requires": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "dependencies": { + "get-stream": { + "version": "3.0.0", + "bundled": true, + "dev": true + } + } + }, + "extend": { + "version": "3.0.2", + "bundled": true, + "dev": true + }, + "extsprintf": { + "version": "1.3.0", + "bundled": true, + "dev": true + }, + "fast-deep-equal": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "figgy-pudding": { + "version": "3.5.1", + "bundled": true, + "dev": true + }, + "find-npm-prefix": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "find-up": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "flush-write-stream": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.4" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "forever-agent": { + "version": "0.6.1", + "bundled": true, + "dev": true + }, + "form-data": { + "version": "2.3.2", + "bundled": true, + "dev": true, + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "1.0.6", + "mime-types": "^2.1.12" + } + }, + "from2": { + "version": "2.3.0", + "bundled": true, + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "dev": true, + "requires": { + "minipass": "^2.6.0" + }, + "dependencies": { + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + } + } + }, + "fs-vacuum": { + "version": "1.2.10", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "path-is-inside": "^1.0.1", + "rimraf": "^2.5.2" + } + }, + "fs-write-stream-atomic": { + "version": "1.0.10", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + }, + "dependencies": { + "iferr": { + "version": "0.1.5", + "bundled": true, + "dev": true + }, + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "function-bind": { + "version": "1.1.1", + "bundled": true, + "dev": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + }, + "dependencies": { + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + } + } + }, + "genfun": { + "version": "5.0.0", + "bundled": true, + "dev": true + }, + "gentle-fs": { + "version": "2.3.0", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.1.2", + "chownr": "^1.1.2", + "cmd-shim": "^3.0.3", + "fs-vacuum": "^1.2.10", + "graceful-fs": "^4.1.11", + "iferr": "^0.1.5", + "infer-owner": "^1.0.4", + "mkdirp": "^0.5.1", + "path-is-inside": "^1.0.2", + "read-cmd-shim": "^1.0.1", + "slide": "^1.1.6" + }, + "dependencies": { + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true + }, + "iferr": { + "version": "0.1.5", + "bundled": true, + "dev": true + } + } + }, + "get-caller-file": { + "version": "1.0.3", + "bundled": true, + "dev": true + }, + "get-stream": { + "version": "4.1.0", + "bundled": true, + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "getpass": { + "version": "0.1.7", + "bundled": true, + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "glob": { + "version": "7.1.6", + "bundled": true, + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "global-dirs": { + "version": "0.1.1", + "bundled": true, + "dev": true, + "requires": { + "ini": "^1.3.4" + } + }, + "got": { + "version": "6.7.1", + "bundled": true, + "dev": true, + "requires": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + }, + "dependencies": { + "get-stream": { + "version": "3.0.0", + "bundled": true, + "dev": true + } + } + }, + "graceful-fs": { + "version": "4.2.4", + "bundled": true, + "dev": true + }, + "har-schema": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "har-validator": { + "version": "5.1.0", + "bundled": true, + "dev": true, + "requires": { + "ajv": "^5.3.0", + "har-schema": "^2.0.0" + } + }, + "has": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-flag": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "has-symbols": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "dev": true + }, + "hosted-git-info": { + "version": "2.8.8", + "bundled": true, + "dev": true + }, + "http-cache-semantics": { + "version": "3.8.1", + "bundled": true, + "dev": true + }, + "http-proxy-agent": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "agent-base": "4", + "debug": "3.1.0" + } + }, + "http-signature": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "https-proxy-agent": { + "version": "2.2.4", + "bundled": true, + "dev": true, + "requires": { + "agent-base": "^4.3.0", + "debug": "^3.1.0" + } + }, + "humanize-ms": { + "version": "1.2.1", + "bundled": true, + "dev": true, + "requires": { + "ms": "^2.0.0" + } + }, + "iconv-lite": { + "version": "0.4.23", + "bundled": true, + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "iferr": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "dev": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "import-lazy": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "imurmurhash": { + "version": "0.1.4", + "bundled": true, + "dev": true + }, + "infer-owner": { + "version": "1.0.4", + "bundled": true, + "dev": true + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true, + "dev": true + }, + "ini": { + "version": "1.3.5", + "bundled": true, + "dev": true + }, + "init-package-json": { + "version": "1.10.3", + "bundled": true, + "dev": true, + "requires": { + "glob": "^7.1.1", + "npm-package-arg": "^4.0.0 || ^5.0.0 || ^6.0.0", + "promzard": "^0.3.0", + "read": "~1.0.1", + "read-package-json": "1 || 2", + "semver": "2.x || 3.x || 4 || 5", + "validate-npm-package-license": "^3.0.1", + "validate-npm-package-name": "^3.0.0" + } + }, + "invert-kv": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "ip": { + "version": "1.1.5", + "bundled": true, + "dev": true + }, + "ip-regex": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "is-callable": { + "version": "1.1.4", + "bundled": true, + "dev": true + }, + "is-ci": { + "version": "1.2.1", + "bundled": true, + "dev": true, + "requires": { + "ci-info": "^1.5.0" + }, + "dependencies": { + "ci-info": { + "version": "1.6.0", + "bundled": true, + "dev": true + } + } + }, + "is-cidr": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "cidr-regex": "^2.0.10" + } + }, + "is-date-object": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "is-installed-globally": { + "version": "0.1.0", + "bundled": true, + "dev": true, + "requires": { + "global-dirs": "^0.1.0", + "is-path-inside": "^1.0.0" + } + }, + "is-npm": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "is-obj": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "is-path-inside": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "requires": { + "path-is-inside": "^1.0.1" + } + }, + "is-redirect": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "is-regex": { + "version": "1.0.4", + "bundled": true, + "dev": true, + "requires": { + "has": "^1.0.1" + } + }, + "is-retry-allowed": { + "version": "1.2.0", + "bundled": true, + "dev": true + }, + "is-stream": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "is-symbol": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "requires": { + "has-symbols": "^1.0.0" + } + }, + "is-typedarray": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "isexe": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "isstream": { + "version": "0.1.2", + "bundled": true, + "dev": true + }, + "jsbn": { + "version": "0.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "json-schema": { + "version": "0.2.3", + "bundled": true, + "dev": true + }, + "json-schema-traverse": { + "version": "0.3.1", + "bundled": true, + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "bundled": true, + "dev": true + }, + "jsonparse": { + "version": "1.3.1", + "bundled": true, + "dev": true + }, + "JSONStream": { + "version": "1.3.5", + "bundled": true, + "dev": true, + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } + }, + "jsprim": { + "version": "1.4.1", + "bundled": true, + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "latest-version": { + "version": "3.1.0", + "bundled": true, + "dev": true, + "requires": { + "package-json": "^4.0.0" + } + }, + "lazy-property": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "lcid": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "requires": { + "invert-kv": "^2.0.0" + } + }, + "libcipm": { + "version": "4.0.7", + "bundled": true, + "dev": true, + "requires": { + "bin-links": "^1.1.2", + "bluebird": "^3.5.1", + "figgy-pudding": "^3.5.1", + "find-npm-prefix": "^1.0.2", + "graceful-fs": "^4.1.11", + "ini": "^1.3.5", + "lock-verify": "^2.0.2", + "mkdirp": "^0.5.1", + "npm-lifecycle": "^3.0.0", + "npm-logical-tree": "^1.2.1", + "npm-package-arg": "^6.1.0", + "pacote": "^9.1.0", + "read-package-json": "^2.0.13", + "rimraf": "^2.6.2", + "worker-farm": "^1.6.0" + } + }, + "libnpm": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "requires": { + "bin-links": "^1.1.2", + "bluebird": "^3.5.3", + "find-npm-prefix": "^1.0.2", + "libnpmaccess": "^3.0.2", + "libnpmconfig": "^1.2.1", + "libnpmhook": "^5.0.3", + "libnpmorg": "^1.0.1", + "libnpmpublish": "^1.1.2", + "libnpmsearch": "^2.0.2", + "libnpmteam": "^1.0.2", + "lock-verify": "^2.0.2", + "npm-lifecycle": "^3.0.0", + "npm-logical-tree": "^1.2.1", + "npm-package-arg": "^6.1.0", + "npm-profile": "^4.0.2", + "npm-registry-fetch": "^4.0.0", + "npmlog": "^4.1.2", + "pacote": "^9.5.3", + "read-package-json": "^2.0.13", + "stringify-package": "^1.0.0" + } + }, + "libnpmaccess": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^2.0.0", + "get-stream": "^4.0.0", + "npm-package-arg": "^6.1.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "libnpmconfig": { + "version": "1.2.1", + "bundled": true, + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1", + "find-up": "^3.0.0", + "ini": "^1.3.5" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-limit": { + "version": "2.2.0", + "bundled": true, + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "bundled": true, + "dev": true + } + } + }, + "libnpmhook": { + "version": "5.0.3", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "libnpmorg": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "libnpmpublish": { + "version": "1.1.2", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.5.1", + "get-stream": "^4.0.0", + "lodash.clonedeep": "^4.5.0", + "normalize-package-data": "^2.4.0", + "npm-package-arg": "^6.1.0", + "npm-registry-fetch": "^4.0.0", + "semver": "^5.5.1", + "ssri": "^6.0.1" + } + }, + "libnpmsearch": { + "version": "2.0.2", + "bundled": true, + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "libnpmteam": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^2.0.0", + "figgy-pudding": "^3.4.1", + "get-stream": "^4.0.0", + "npm-registry-fetch": "^4.0.0" + } + }, + "libnpx": { + "version": "10.2.2", + "bundled": true, + "dev": true, + "requires": { + "dotenv": "^5.0.1", + "npm-package-arg": "^6.0.0", + "rimraf": "^2.6.2", + "safe-buffer": "^5.1.0", + "update-notifier": "^2.3.0", + "which": "^1.3.0", + "y18n": "^4.0.0", + "yargs": "^11.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "lock-verify": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "npm-package-arg": "^6.1.0", + "semver": "^5.4.1" + } + }, + "lockfile": { + "version": "1.0.4", + "bundled": true, + "dev": true, + "requires": { + "signal-exit": "^3.0.2" + } + }, + "lodash._baseindexof": { + "version": "3.1.0", + "bundled": true, + "dev": true + }, + "lodash._baseuniq": { + "version": "4.6.0", + "bundled": true, + "dev": true, + "requires": { + "lodash._createset": "~4.0.0", + "lodash._root": "~3.0.0" + } + }, + "lodash._bindcallback": { + "version": "3.0.1", + "bundled": true, + "dev": true + }, + "lodash._cacheindexof": { + "version": "3.0.2", + "bundled": true, + "dev": true + }, + "lodash._createcache": { + "version": "3.1.2", + "bundled": true, + "dev": true, + "requires": { + "lodash._getnative": "^3.0.0" + } + }, + "lodash._createset": { + "version": "4.0.3", + "bundled": true, + "dev": true + }, + "lodash._getnative": { + "version": "3.9.1", + "bundled": true, + "dev": true + }, + "lodash._root": { + "version": "3.0.1", + "bundled": true, + "dev": true + }, + "lodash.clonedeep": { + "version": "4.5.0", + "bundled": true, + "dev": true + }, + "lodash.restparam": { + "version": "3.6.1", + "bundled": true, + "dev": true + }, + "lodash.union": { + "version": "4.6.0", + "bundled": true, + "dev": true + }, + "lodash.uniq": { + "version": "4.5.0", + "bundled": true, + "dev": true + }, + "lodash.without": { + "version": "4.4.0", + "bundled": true, + "dev": true + }, + "lowercase-keys": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "lru-cache": { + "version": "5.1.1", + "bundled": true, + "dev": true, + "requires": { + "yallist": "^3.0.2" + } + }, + "make-dir": { + "version": "1.3.0", + "bundled": true, + "dev": true, + "requires": { + "pify": "^3.0.0" + } + }, + "make-fetch-happen": { + "version": "5.0.2", + "bundled": true, + "dev": true, + "requires": { + "agentkeepalive": "^3.4.1", + "cacache": "^12.0.0", + "http-cache-semantics": "^3.8.1", + "http-proxy-agent": "^2.1.0", + "https-proxy-agent": "^2.2.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "node-fetch-npm": "^2.0.2", + "promise-retry": "^1.1.1", + "socks-proxy-agent": "^4.0.0", + "ssri": "^6.0.0" + } + }, + "map-age-cleaner": { + "version": "0.1.3", + "bundled": true, + "dev": true, + "requires": { + "p-defer": "^1.0.0" + } + }, + "meant": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "mem": { + "version": "4.3.0", + "bundled": true, + "dev": true, + "requires": { + "map-age-cleaner": "^0.1.1", + "mimic-fn": "^2.0.0", + "p-is-promise": "^2.0.0" + }, + "dependencies": { + "mimic-fn": { + "version": "2.1.0", + "bundled": true, + "dev": true + } + } + }, + "mime-db": { + "version": "1.35.0", + "bundled": true, + "dev": true + }, + "mime-types": { + "version": "2.1.19", + "bundled": true, + "dev": true, + "requires": { + "mime-db": "~1.35.0" + } + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "dev": true, + "requires": { + "minipass": "^2.9.0" + }, + "dependencies": { + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + } + } + }, + "mississippi": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^3.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + } + }, + "mkdirp": { + "version": "0.5.5", + "bundled": true, + "dev": true, + "requires": { + "minimist": "^1.2.5" + }, + "dependencies": { + "minimist": { + "version": "1.2.5", + "bundled": true, + "dev": true + } + } + }, + "move-concurrently": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + }, + "dependencies": { + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true + } + } + }, + "ms": { + "version": "2.1.1", + "bundled": true, + "dev": true + }, + "mute-stream": { + "version": "0.0.7", + "bundled": true, + "dev": true + }, + "nice-try": { + "version": "1.0.5", + "bundled": true, + "dev": true + }, + "node-fetch-npm": { + "version": "2.0.2", + "bundled": true, + "dev": true, + "requires": { + "encoding": "^0.1.11", + "json-parse-better-errors": "^1.0.0", + "safe-buffer": "^5.1.1" + } + }, + "node-gyp": { + "version": "5.1.0", + "bundled": true, + "dev": true, + "requires": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.2", + "mkdirp": "^0.5.1", + "nopt": "^4.0.1", + "npmlog": "^4.1.2", + "request": "^2.88.0", + "rimraf": "^2.6.3", + "semver": "^5.7.1", + "tar": "^4.4.12", + "which": "^1.3.1" + } + }, + "nopt": { + "version": "4.0.3", + "bundled": true, + "dev": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "normalize-package-data": { + "version": "2.5.0", + "bundled": true, + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + }, + "dependencies": { + "resolve": { + "version": "1.10.0", + "bundled": true, + "dev": true, + "requires": { + "path-parse": "^1.0.6" + } + } + } + }, + "npm-audit-report": { + "version": "1.3.2", + "bundled": true, + "dev": true, + "requires": { + "cli-table3": "^0.5.0", + "console-control-strings": "^1.1.0" + } + }, + "npm-bundled": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-cache-filename": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "npm-install-checks": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "requires": { + "semver": "^2.3.0 || 3.x || 4 || 5" + } + }, + "npm-lifecycle": { + "version": "3.1.4", + "bundled": true, + "dev": true, + "requires": { + "byline": "^5.0.0", + "graceful-fs": "^4.1.15", + "node-gyp": "^5.0.2", + "resolve-from": "^4.0.0", + "slide": "^1.1.6", + "uid-number": "0.0.6", + "umask": "^1.1.0", + "which": "^1.3.1" + } + }, + "npm-logical-tree": { + "version": "1.2.1", + "bundled": true, + "dev": true + }, + "npm-normalize-package-bin": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "npm-package-arg": { + "version": "6.1.1", + "bundled": true, + "dev": true, + "requires": { + "hosted-git-info": "^2.7.1", + "osenv": "^0.1.5", + "semver": "^5.6.0", + "validate-npm-package-name": "^3.0.0" + } + }, + "npm-packlist": { + "version": "1.4.8", + "bundled": true, + "dev": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-pick-manifest": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1", + "npm-package-arg": "^6.0.0", + "semver": "^5.4.1" + } + }, + "npm-profile": { + "version": "4.0.4", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.1.2 || 2", + "figgy-pudding": "^3.4.1", + "npm-registry-fetch": "^4.0.0" + } + }, + "npm-registry-fetch": { + "version": "4.0.5", + "bundled": true, + "dev": true, + "requires": { + "bluebird": "^3.5.1", + "figgy-pudding": "^3.4.1", + "JSONStream": "^1.3.4", + "lru-cache": "^5.1.1", + "make-fetch-happen": "^5.0.0", + "npm-package-arg": "^6.1.0", + "safe-buffer": "^5.2.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.2.1", + "bundled": true, + "dev": true + } + } + }, + "npm-run-path": { + "version": "2.0.2", + "bundled": true, + "dev": true, + "requires": { + "path-key": "^2.0.0" + } + }, + "npm-user-validate": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "dev": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "oauth-sign": { + "version": "0.9.0", + "bundled": true, + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "dev": true + }, + "object-keys": { + "version": "1.0.12", + "bundled": true, + "dev": true + }, + "object.getownpropertydescriptors": { + "version": "2.0.3", + "bundled": true, + "dev": true, + "requires": { + "define-properties": "^1.1.2", + "es-abstract": "^1.5.1" + } + }, + "once": { + "version": "1.4.0", + "bundled": true, + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "opener": { + "version": "1.5.1", + "bundled": true, + "dev": true + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "os-locale": { + "version": "3.1.0", + "bundled": true, + "dev": true, + "requires": { + "execa": "^1.0.0", + "lcid": "^2.0.0", + "mem": "^4.0.0" + }, + "dependencies": { + "cross-spawn": { + "version": "6.0.5", + "bundled": true, + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "execa": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + } + } + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "dev": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "p-defer": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "p-finally": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "p-is-promise": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "p-limit": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "package-json": { + "version": "4.0.1", + "bundled": true, + "dev": true, + "requires": { + "got": "^6.7.1", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + } + }, + "pacote": { + "version": "9.5.12", + "bundled": true, + "dev": true, + "requires": { + "bluebird": "^3.5.3", + "cacache": "^12.0.2", + "chownr": "^1.1.2", + "figgy-pudding": "^3.5.1", + "get-stream": "^4.1.0", + "glob": "^7.1.3", + "infer-owner": "^1.0.4", + "lru-cache": "^5.1.1", + "make-fetch-happen": "^5.0.0", + "minimatch": "^3.0.4", + "minipass": "^2.3.5", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "normalize-package-data": "^2.4.0", + "npm-normalize-package-bin": "^1.0.0", + "npm-package-arg": "^6.1.0", + "npm-packlist": "^1.1.12", + "npm-pick-manifest": "^3.0.0", + "npm-registry-fetch": "^4.0.0", + "osenv": "^0.1.5", + "promise-inflight": "^1.0.1", + "promise-retry": "^1.1.1", + "protoduck": "^5.0.1", + "rimraf": "^2.6.2", + "safe-buffer": "^5.1.2", + "semver": "^5.6.0", + "ssri": "^6.0.1", + "tar": "^4.4.10", + "unique-filename": "^1.1.1", + "which": "^1.3.1" + }, + "dependencies": { + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + } + } + }, + "parallel-transform": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "requires": { + "cyclist": "~0.2.2", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "path-exists": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "path-is-inside": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "path-key": { + "version": "2.0.1", + "bundled": true, + "dev": true + }, + "path-parse": { + "version": "1.0.6", + "bundled": true, + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "pify": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "prepend-http": { + "version": "1.0.4", + "bundled": true, + "dev": true + }, + "process-nextick-args": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "promise-inflight": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "promise-retry": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "err-code": "^1.0.0", + "retry": "^0.10.0" + }, + "dependencies": { + "retry": { + "version": "0.10.1", + "bundled": true, + "dev": true + } + } + }, + "promzard": { + "version": "0.3.0", + "bundled": true, + "dev": true, + "requires": { + "read": "1" + } + }, + "proto-list": { + "version": "1.2.4", + "bundled": true, + "dev": true + }, + "protoduck": { + "version": "5.0.1", + "bundled": true, + "dev": true, + "requires": { + "genfun": "^5.0.0" + } + }, + "prr": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "pseudomap": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "psl": { + "version": "1.1.29", + "bundled": true, + "dev": true + }, + "pump": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "pumpify": { + "version": "1.5.1", + "bundled": true, + "dev": true, + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + }, + "dependencies": { + "pump": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + } + } + }, + "punycode": { + "version": "1.4.1", + "bundled": true, + "dev": true + }, + "qrcode-terminal": { + "version": "0.12.0", + "bundled": true, + "dev": true + }, + "qs": { + "version": "6.5.2", + "bundled": true, + "dev": true + }, + "query-string": { + "version": "6.8.2", + "bundled": true, + "dev": true, + "requires": { + "decode-uri-component": "^0.2.0", + "split-on-first": "^1.0.0", + "strict-uri-encode": "^2.0.0" + } + }, + "qw": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "rc": { + "version": "1.2.8", + "bundled": true, + "dev": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.5", + "bundled": true, + "dev": true + } + } + }, + "read": { + "version": "1.0.7", + "bundled": true, + "dev": true, + "requires": { + "mute-stream": "~0.0.4" + } + }, + "read-cmd-shim": { + "version": "1.0.5", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.2" + } + }, + "read-installed": { + "version": "4.0.3", + "bundled": true, + "dev": true, + "requires": { + "debuglog": "^1.0.1", + "graceful-fs": "^4.1.2", + "read-package-json": "^2.0.0", + "readdir-scoped-modules": "^1.0.0", + "semver": "2 || 3 || 4 || 5", + "slide": "~1.1.3", + "util-extend": "^1.0.1" + } + }, + "read-package-json": { + "version": "2.1.1", + "bundled": true, + "dev": true, + "requires": { + "glob": "^7.1.1", + "graceful-fs": "^4.1.2", + "json-parse-better-errors": "^1.0.1", + "normalize-package-data": "^2.0.0", + "npm-normalize-package-bin": "^1.0.0" + } + }, + "read-package-tree": { + "version": "5.3.1", + "bundled": true, + "dev": true, + "requires": { + "read-package-json": "^2.0.0", + "readdir-scoped-modules": "^1.0.0", + "util-promisify": "^2.1.0" + } + }, + "readable-stream": { + "version": "3.6.0", + "bundled": true, + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "readdir-scoped-modules": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "requires": { + "debuglog": "^1.0.1", + "dezalgo": "^1.0.0", + "graceful-fs": "^4.1.2", + "once": "^1.3.0" + } + }, + "registry-auth-token": { + "version": "3.4.0", + "bundled": true, + "dev": true, + "requires": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "registry-url": { + "version": "3.1.0", + "bundled": true, + "dev": true, + "requires": { + "rc": "^1.0.1" + } + }, + "request": { + "version": "2.88.0", + "bundled": true, + "dev": true, + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.0", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.4.3", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + } + }, + "require-directory": { + "version": "2.1.1", + "bundled": true, + "dev": true + }, + "require-main-filename": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "resolve-from": { + "version": "4.0.0", + "bundled": true, + "dev": true + }, + "retry": { + "version": "0.12.0", + "bundled": true, + "dev": true + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "run-queue": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "requires": { + "aproba": "^1.1.1" + }, + "dependencies": { + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true + } + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true, + "dev": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true, + "dev": true + }, + "semver": { + "version": "5.7.1", + "bundled": true, + "dev": true + }, + "semver-diff": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "semver": "^5.0.3" + } + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "sha": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.2" + } + }, + "shebang-command": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "dev": true + }, + "slide": { + "version": "1.1.6", + "bundled": true, + "dev": true + }, + "smart-buffer": { + "version": "4.1.0", + "bundled": true, + "dev": true + }, + "socks": { + "version": "2.3.3", + "bundled": true, + "dev": true, + "requires": { + "ip": "1.1.5", + "smart-buffer": "^4.1.0" + } + }, + "socks-proxy-agent": { + "version": "4.0.2", + "bundled": true, + "dev": true, + "requires": { + "agent-base": "~4.2.1", + "socks": "~2.3.2" + }, + "dependencies": { + "agent-base": { + "version": "4.2.1", + "bundled": true, + "dev": true, + "requires": { + "es6-promisify": "^5.0.0" + } + } + } + }, + "sorted-object": { + "version": "2.0.1", + "bundled": true, + "dev": true + }, + "sorted-union-stream": { + "version": "2.1.3", + "bundled": true, + "dev": true, + "requires": { + "from2": "^1.3.0", + "stream-iterate": "^1.1.0" + }, + "dependencies": { + "from2": { + "version": "1.3.0", + "bundled": true, + "dev": true, + "requires": { + "inherits": "~2.0.1", + "readable-stream": "~1.1.10" + } + }, + "isarray": { + "version": "0.0.1", + "bundled": true, + "dev": true + }, + "readable-stream": { + "version": "1.1.14", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "bundled": true, + "dev": true + } + } + }, + "spdx-correct": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.1.0", + "bundled": true, + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.5", + "bundled": true, + "dev": true + }, + "split-on-first": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "sshpk": { + "version": "1.14.2", + "bundled": true, + "dev": true, + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "ssri": { + "version": "6.0.1", + "bundled": true, + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1" + } + }, + "stream-each": { + "version": "1.2.2", + "bundled": true, + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "stream-iterate": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "readable-stream": "^2.1.5", + "stream-shift": "^1.0.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "stream-shift": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "strict-uri-encode": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "string_decoder": { + "version": "1.3.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.2.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.2.0", + "bundled": true, + "dev": true + } + } + }, + "string-width": { + "version": "2.1.1", + "bundled": true, + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "bundled": true, + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "stringify-package": { + "version": "1.0.1", + "bundled": true, + "dev": true + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-eof": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "dev": true + }, + "supports-color": { + "version": "5.4.0", + "bundled": true, + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "dev": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + }, + "dependencies": { + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + } + } + }, + "term-size": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "requires": { + "execa": "^0.7.0" + } + }, + "text-table": { + "version": "0.2.0", + "bundled": true, + "dev": true + }, + "through": { + "version": "2.3.8", + "bundled": true, + "dev": true + }, + "through2": { + "version": "2.0.3", + "bundled": true, + "dev": true, + "requires": { + "readable-stream": "^2.1.5", + "xtend": "~4.0.1" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, + "timed-out": { + "version": "4.0.1", + "bundled": true, + "dev": true + }, + "tiny-relative-date": { + "version": "1.3.0", + "bundled": true, + "dev": true + }, + "tough-cookie": { + "version": "2.4.3", + "bundled": true, + "dev": true, + "requires": { + "psl": "^1.1.24", + "punycode": "^1.4.1" + } + }, + "tunnel-agent": { + "version": "0.6.0", + "bundled": true, + "dev": true, + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "bundled": true, + "dev": true, + "optional": true + }, + "typedarray": { + "version": "0.0.6", + "bundled": true, + "dev": true + }, + "uid-number": { + "version": "0.0.6", + "bundled": true, + "dev": true + }, + "umask": { + "version": "1.1.0", + "bundled": true, + "dev": true + }, + "unique-filename": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "requires": { + "unique-slug": "^2.0.0" + } + }, + "unique-slug": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "requires": { + "imurmurhash": "^0.1.4" + } + }, + "unique-string": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "requires": { + "crypto-random-string": "^1.0.0" + } + }, + "unpipe": { + "version": "1.0.0", + "bundled": true, + "dev": true + }, + "unzip-response": { + "version": "2.0.1", + "bundled": true, + "dev": true + }, + "update-notifier": { + "version": "2.5.0", + "bundled": true, + "dev": true, + "requires": { + "boxen": "^1.2.1", + "chalk": "^2.0.1", + "configstore": "^3.0.0", + "import-lazy": "^2.1.0", + "is-ci": "^1.0.10", + "is-installed-globally": "^0.1.0", + "is-npm": "^1.0.0", + "latest-version": "^3.0.0", + "semver-diff": "^2.0.0", + "xdg-basedir": "^3.0.0" + } + }, + "url-parse-lax": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "requires": { + "prepend-http": "^1.0.1" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "util-extend": { + "version": "1.0.3", + "bundled": true, + "dev": true + }, + "util-promisify": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "object.getownpropertydescriptors": "^2.0.3" + } + }, + "uuid": { + "version": "3.3.3", + "bundled": true, + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.4", + "bundled": true, + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "validate-npm-package-name": { + "version": "3.0.0", + "bundled": true, + "dev": true, + "requires": { + "builtins": "^1.0.3" + } + }, + "verror": { + "version": "1.10.0", + "bundled": true, + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "wcwidth": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "requires": { + "defaults": "^1.0.3" + } + }, + "which": { + "version": "1.3.1", + "bundled": true, + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "bundled": true, + "dev": true + }, + "wide-align": { + "version": "1.1.2", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^1.0.2" + }, + "dependencies": { + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + } + } + }, + "widest-line": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^2.1.1" + } + }, + "worker-farm": { + "version": "1.7.0", + "bundled": true, + "dev": true, + "requires": { + "errno": "~0.1.7" + } + }, + "wrap-ansi": { + "version": "2.1.0", + "bundled": true, + "dev": true, + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + }, + "dependencies": { + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + } + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true, + "dev": true + }, + "write-file-atomic": { + "version": "2.4.3", + "bundled": true, + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.2" + } + }, + "xdg-basedir": { + "version": "3.0.0", + "bundled": true, + "dev": true + }, + "xtend": { + "version": "4.0.1", + "bundled": true, + "dev": true + }, + "y18n": { + "version": "4.0.0", + "bundled": true, + "dev": true + }, + "yallist": { + "version": "3.0.3", + "bundled": true, + "dev": true + }, + "yargs": { + "version": "11.1.1", + "bundled": true, + "dev": true, + "requires": { + "cliui": "^4.0.0", + "decamelize": "^1.1.1", + "find-up": "^2.1.0", + "get-caller-file": "^1.0.1", + "os-locale": "^3.1.0", + "require-directory": "^2.1.1", + "require-main-filename": "^1.0.1", + "set-blocking": "^2.0.0", + "string-width": "^2.0.0", + "which-module": "^2.0.0", + "y18n": "^3.2.1", + "yargs-parser": "^9.0.2" + }, + "dependencies": { + "y18n": { + "version": "3.2.1", + "bundled": true, + "dev": true + } + } + }, + "yargs-parser": { + "version": "9.0.2", + "bundled": true, + "dev": true, + "requires": { + "camelcase": "^4.1.0" + } + } + } + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, + "nyc": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/nyc/-/nyc-15.1.0.tgz", + "integrity": "sha512-jMW04n9SxKdKi1ZMGhvUTHBN0EICCRkHemEoE5jm6mTYcqcdas0ATzgUgejlQUHMvpnOZqGB5Xxsv9KxJW1j8A==", + "dev": true, + "requires": { + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "caching-transform": "^4.0.0", + "convert-source-map": "^1.7.0", + "decamelize": "^1.2.0", + "find-cache-dir": "^3.2.0", + "find-up": "^4.1.0", + "foreground-child": "^2.0.0", + "get-package-type": "^0.1.0", + "glob": "^7.1.6", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-hook": "^3.0.0", + "istanbul-lib-instrument": "^4.0.0", + "istanbul-lib-processinfo": "^2.0.2", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.0.2", + "make-dir": "^3.0.0", + "node-preload": "^0.2.1", + "p-map": "^3.0.0", + "process-on-spawn": "^1.0.0", + "resolve-from": "^5.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "spawn-wrap": "^2.0.0", + "test-exclude": "^6.0.0", + "yargs": "^15.0.2" + }, + "dependencies": { + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "requires": { + "aggregate-error": "^3.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + } + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", + "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "p-each-series": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-2.2.0.tgz", + "integrity": "sha512-ycIL2+1V32th+8scbpTvyHNaHe02z0sjgh91XXjAk+ZeXoPN4Z46DVUnzdso0aX4KckKw0FNNFHdjZ2UsZvxiA==", + "dev": true + }, + "p-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz", + "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==", + "dev": true, + "requires": { + "p-map": "^2.0.0" + } + }, + "p-is-promise": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-3.0.0.tgz", + "integrity": "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ==", + "dev": true + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-map": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", + "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", + "dev": true + }, + "p-reduce": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz", + "integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==", + "dev": true + }, + "p-retry": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.5.0.tgz", + "integrity": "sha512-5Hwh4aVQSu6BEP+w2zKlVXtFAaYQe1qWuVADSgoeVlLjwe/Q/AMSoRR4MDeaAfu8llT+YNbEijWu/YF3m6avkg==", + "dev": true, + "requires": { + "@types/retry": "^0.12.0", + "retry": "^0.12.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "package-hash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz", + "integrity": "sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.15", + "hasha": "^5.0.0", + "lodash.flattendeep": "^4.4.0", + "release-zalgo": "^1.0.0" + } + }, + "parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "requires": { + "callsites": "^3.0.0" + } + }, + "parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "dev": true, + "requires": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "path-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", + "dev": true, + "requires": { + "pify": "^3.0.0" + } + }, + "picomatch": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.1.tgz", + "integrity": "sha512-ISBaA8xQNmwELC7eOjqFKMESB2VIqt4PPDD0nsS95b/9dZXvVKOlz9keMSnoGGKcOHXfTvDD6WMaRoSc9UuhRA==", + "dev": true + }, + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true + }, + "pkg-conf": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-2.1.0.tgz", + "integrity": "sha1-ISZRTKbyq/69FoWW3xi6V4Z/AFg=", + "dev": true, + "requires": { + "find-up": "^2.0.0", + "load-json-file": "^4.0.0" + } + }, + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "requires": { + "find-up": "^4.0.0" + }, + "dependencies": { + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + } + } + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "process-on-spawn": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.0.0.tgz", + "integrity": "sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg==", + "dev": true, + "requires": { + "fromentries": "^1.2.0" + } + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "dev": true + }, + "queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true + }, + "quick-lru": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-1.1.0.tgz", + "integrity": "sha1-Q2CxfGETatOAeDl/8RQW4Ybc+7g=", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dev": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + } + } + }, + "read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", + "dev": true, + "requires": { + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" + } + }, + "read-pkg-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", + "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", + "dev": true, + "requires": { + "find-up": "^2.0.0", + "read-pkg": "^3.0.0" + } + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "readdirp": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", + "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "redent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-2.0.0.tgz", + "integrity": "sha1-wbIAe0LVfrE4kHmzyDM2OdXhzKo=", + "dev": true, + "requires": { + "indent-string": "^3.0.0", + "strip-indent": "^2.0.0" + } + }, + "redeyed": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", + "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", + "dev": true, + "requires": { + "esprima": "~4.0.0" + } + }, + "registry-auth-token": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.1.1.tgz", + "integrity": "sha512-9bKS7nTl9+/A1s7tnPeGrUpRcVY+LUh7bfFgzpndALdPfXQBfQV77rQVtqgUV3ti4vc/Ik81Ex8UJDWDQ12zQA==", + "dev": true, + "requires": { + "rc": "^1.2.8" + } + }, + "release-zalgo": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", + "integrity": "sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA=", + "dev": true, + "requires": { + "es6-error": "^4.0.1" + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "resolve": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", + "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", + "dev": true, + "requires": { + "path-parse": "^1.0.6" + } + }, + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", + "dev": true + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true + }, + "rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "requires": { + "queue-microtask": "^1.2.2" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "semantic-release": { + "version": "17.4.3", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-17.4.3.tgz", + "integrity": "sha512-lTOUSrkbaQ+TRs3+BmtJhLtPSyiO7iTGmh5SyuEFqNO8HQbQ4nzXg4UlPrDQasO/C0eFK/V0eCbOzJdjtKBOYw==", + "dev": true, + "requires": { + "@semantic-release/commit-analyzer": "^8.0.0", + "@semantic-release/error": "^2.2.0", + "@semantic-release/github": "^7.0.0", + "@semantic-release/npm": "^7.0.0", + "@semantic-release/release-notes-generator": "^9.0.0", + "aggregate-error": "^3.0.0", + "cosmiconfig": "^7.0.0", + "debug": "^4.0.0", + "env-ci": "^5.0.0", + "execa": "^5.0.0", + "figures": "^3.0.0", + "find-versions": "^4.0.0", + "get-stream": "^6.0.0", + "git-log-parser": "^1.2.0", + "hook-std": "^2.0.0", + "hosted-git-info": "^4.0.0", + "lodash": "^4.17.21", + "marked": "^2.0.0", + "marked-terminal": "^4.1.1", + "micromatch": "^4.0.2", + "p-each-series": "^2.1.0", + "p-reduce": "^2.0.0", + "read-pkg-up": "^7.0.0", + "resolve-from": "^5.0.0", + "semver": "^7.3.2", + "semver-diff": "^3.1.1", + "signale": "^1.2.1", + "yargs": "^16.2.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "hosted-git-info": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.0.2.tgz", + "integrity": "sha512-c9OGXbZ3guC/xOlCg1Ci/VgWlwsqDv1yMQL1CWqXDL0hDjXuNcq0zuR4xqPSuasI3kqFDhqSyTjREz5gzq0fXg==", + "dev": true, + "requires": { + "lru-cache": "^6.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "requires": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "dependencies": { + "type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true + } + } + }, + "read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "requires": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + } + }, + "semver": { + "version": "7.3.5", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", + "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "dev": true, + "requires": { + "lru-cache": "^6.0.0" + } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + }, + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + }, + "y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true + }, + "yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "requires": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + } + }, + "yargs-parser": { + "version": "20.2.7", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.7.tgz", + "integrity": "sha512-FiNkvbeHzB/syOjIUxFDCnhSfzAL8R5vs40MgLFBorXACCOAEaWu0gRZl14vG8MR9AOJIZbmkjhusqBYZ3HTHw==", + "dev": true + } + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + }, + "semver-diff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", + "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", + "dev": true, + "requires": { + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "semver-regex": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.3.tgz", + "integrity": "sha512-Aqi54Mk9uYTjVexLnR67rTyBusmwd04cLkHy9hNvk3+G3nT2Oyg7E0l4XVbOaNwIvQ3hHeYxGcyEy+mKreyBFQ==", + "dev": true + }, + "serialize-javascript": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz", + "integrity": "sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "signale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/signale/-/signale-1.4.0.tgz", + "integrity": "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w==", + "dev": true, + "requires": { + "chalk": "^2.3.2", + "figures": "^2.0.0", + "pkg-conf": "^2.1.0" + }, + "dependencies": { + "figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + } + } + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + }, + "spawn-error-forwarder": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz", + "integrity": "sha1-Gv2Uc46ZmwNG17n8NzvlXgdXcCk=", + "dev": true + }, + "spawn-wrap": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", + "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", + "dev": true, + "requires": { + "foreground-child": "^2.0.0", + "is-windows": "^1.0.2", + "make-dir": "^3.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "which": "^2.0.1" + }, + "dependencies": { + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "spdx-correct": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", + "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz", + "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", + "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", + "dev": true + }, + "split": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz", + "integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==", + "dev": true, + "requires": { + "through": "2" + } + }, + "split2": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-2.2.0.tgz", + "integrity": "sha512-RAb22TG39LhI31MbreBgIuKiIKhVsawfTgEGqKHTK87aG+ul/PB8Sqoi3I7kVdRWiCfrKxK3uo4/YUkpNvhPbw==", + "dev": true, + "requires": { + "through2": "^2.0.2" + }, + "dependencies": { + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "stream-combiner2": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", + "integrity": "sha1-+02KFCDqNidk4hrUeAOXvry0HL4=", + "dev": true, + "requires": { + "duplexer2": "~0.1.0", + "readable-stream": "^2.0.2" + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true + }, + "strip-indent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-2.0.0.tgz", + "integrity": "sha1-XvjbKV0B5u1sv3qrlpmNeCJSe2g=", + "dev": true + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true + }, + "supports-color": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", + "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "supports-hyperlinks": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz", + "integrity": "sha512-6sXEzV5+I5j8Bmq9/vUphGRM/RJNT9SCURJLjwfOg51heRtguGWDzcaBlgAzKhQa0EVNpPEKzQuBwZ8S8WaCeQ==", + "dev": true, + "requires": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "dependencies": { + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "temp-dir": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz", + "integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==", + "dev": true + }, + "tempy": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tempy/-/tempy-0.5.0.tgz", + "integrity": "sha512-VEY96x7gbIRfsxqsafy2l5yVxxp3PhwAGoWMyC2D2Zt5DmEv+2tGiPOrquNRpf21hhGnKLVEsuqleqiZmKG/qw==", + "dev": true, + "requires": { + "is-stream": "^2.0.0", + "temp-dir": "^2.0.0", + "type-fest": "^0.12.0", + "unique-string": "^2.0.0" + }, + "dependencies": { + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true + }, + "type-fest": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.12.0.tgz", + "integrity": "sha512-53RyidyjvkGpnWPMF9bQgFtWp+Sl8O2Rp13VavmJgfAP9WWG6q6TkrKU8iyJdnwnfgHI6k2hTlgqH4aSdjoTbg==", + "dev": true + } + } + }, + "test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "requires": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + } + }, + "text-extensions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-1.9.0.tgz", + "integrity": "sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ==", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "through2": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/through2/-/through2-3.0.1.tgz", + "integrity": "sha512-M96dvTalPT3YbYLaKaCuwu+j06D/8Jfib0o/PxbVt6Amhv3dUAtW6rTV1jPgJSBG83I/e04Y6xkVdVhSRhi0ww==", + "dev": true, + "requires": { + "readable-stream": "2 || 3" + } + }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=", + "dev": true + }, + "traverse": { + "version": "0.6.6", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.6.tgz", + "integrity": "sha1-y99WD9e5r2MlAv7UD5GMFX6pcTc=", + "dev": true + }, + "trim-newlines": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-2.0.0.tgz", + "integrity": "sha1-tAPQuRvlDDMd/EuC7s6yLD3hbSA=", + "dev": true + }, + "trim-off-newlines": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/trim-off-newlines/-/trim-off-newlines-1.0.3.tgz", + "integrity": "sha512-kh6Tu6GbeSNMGfrrZh6Bb/4ZEHV1QlB4xNDBeog8Y9/QwFlKTRyWvY3Fs9tRDAMZliVUwieMgEdIeL/FtqjkJg==", + "dev": true + }, + "type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true + }, + "typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dev": true, + "requires": { + "is-typedarray": "^1.0.0" + } + }, + "uglify-js": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.8.0.tgz", + "integrity": "sha512-ugNSTT8ierCsDHso2jkBHXYrU8Y5/fY2ZUprfrJUiD7YpuFvV4jODLFmb3h4btQjqr5Nh4TX4XtgDfCU1WdioQ==", + "dev": true, + "optional": true, + "requires": { + "commander": "~2.20.3", + "source-map": "~0.6.1" + }, + "dependencies": { + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "optional": true + } + } + }, + "unique-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", + "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", + "dev": true, + "requires": { + "crypto-random-string": "^2.0.0" + } + }, + "universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==", + "dev": true + }, + "universalify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-1.0.0.tgz", + "integrity": "sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug==", + "dev": true + }, + "url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=", + "dev": true + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha1-lmRU6HZUYuN2RNNib2dCzotwll0=", + "dev": true, + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "requires": { + "string-width": "^1.0.2 || 2" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + } + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "workerpool": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.0.2.tgz", + "integrity": "sha512-DSNyvOpFKrNusaaUwk+ej6cBj1bmhLcBfj80elGk+ZIo5JSkq+unB1dLKEOcNfJDZgjGICfhQ0Q5TbP0PvF4+Q==", + "dev": true + }, + "wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true + }, + "y18n": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.1.tgz", + "integrity": "sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ==", + "dev": true + }, + "yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true + }, + "yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "dev": true, + "requires": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + }, + "dependencies": { + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } + }, + "yargs-parser": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-10.1.0.tgz", + "integrity": "sha512-VCIyR1wJoEBZUqk5PA+oOBF6ypbwh5aNB3I50guxAL/quggdfs4TtNHQrSazFA3fYZ+tEqfs0zIGlv0c/rgjbQ==", + "dev": true, + "requires": { + "camelcase": "^4.1.0" + } + }, + "yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "requires": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "dependencies": { + "camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", + "dev": true + }, + "decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true + }, + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true + } + } + }, + "yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/package.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/package.json new file mode 100644 index 00000000000..ef849c1275f --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/package.json @@ -0,0 +1,74 @@ +{ + "name": "@asyncapi/specs", + "version": "2.14.0-2022-04-release.3", + "description": "AsyncAPI schema versions", + "main": "index.js", + "scripts": { + "test": "nyc mocha", + "release": "semantic-release", + "generate:assets": "echo 'No additional assets need to be generated at the moment'", + "lint": "echo 'No linter integrated yet'", + "bump:version": "npm --no-git-tag-version --allow-same-version version $VERSION" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/asyncapi/spec-json-schemas.git" + }, + "author": { + "name": "Fran Mendez", + "email": "fmvilas@gmail.com" + }, + "publishConfig": { + "access": "public" + }, + "contributors": [ + { + "name": "Bruno Pedro", + "email": "bpedro@hitchhq.com" + } + ], + "license": "Apache-2.0", + "bugs": { + "url": "https://github.com/asyncapi/spec-json-schemas/issues" + }, + "homepage": "https://github.com/asyncapi/spec-json-schemas#readme", + "devDependencies": { + "mocha": "^8.2.1", + "nyc": "^15.1.0", + "@semantic-release/commit-analyzer": "^8.0.1", + "@semantic-release/github": "7.2.3", + "@semantic-release/npm": "^7.0.3", + "@semantic-release/release-notes-generator": "^9.0.1", + "conventional-changelog-conventionalcommits": "^4.2.3", + "semantic-release": "17.4.3" + }, + "release": { + "branches": [ + "master", + { + "name": "2022-04-release", + "prerelease": true + }, + { + "name": "next-major-spec", + "prerelease": true + } + ], + "plugins": [ + [ + "@semantic-release/commit-analyzer", + { + "preset": "conventionalcommits" + } + ], + [ + "@semantic-release/release-notes-generator", + { + "preset": "conventionalcommits" + } + ], + "@semantic-release/npm", + "@semantic-release/github" + ] + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.0.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.0.0.json new file mode 100644 index 00000000000..fe7f768116f --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.0.0.json @@ -0,0 +1,849 @@ +{ + "title": "AsyncAPI 1.0 schema.", + "id": "http://asyncapi.hitchhq.com/v1/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "topics" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "1.0.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "baseTopic": { + "type": "string", + "pattern": "^[^/.]", + "description": "The base topic to the API. Example: 'hitch'.", + "default": "" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "topics": { + "$ref": "#/definitions/topics" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "type": "string", + "format": "uri" + } + } + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "scheme" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "scheme": { + "type": "string", + "description": "The transfer protocol.", + "enum": [ + "kafka", + "kafka-secure", + "amqp", + "amqps", + "mqtt", + "mqtts", + "secure-mqtt", + "ws", + "wss", + "stomp", + "stomps" + ] + }, + "schemeVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "topics": { + "type": "object", + "description": "Relative paths to the individual topics. They must be relative to the 'baseTopic'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^[^.]": { + "$ref": "#/definitions/topicItem" + } + }, + "additionalProperties": false + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9\\.\\-_]+$": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/SecurityScheme" } + ] + } + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + } + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "topicItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "properties": { + "$ref": { + "type": "string" + }, + "publish": { + "$ref": "#/definitions/message" + }, + "subscribe": { + "$ref": "#/definitions/message" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + }, + "message": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/schema" + }, + "payload": { + "$ref": "#/definitions/schema" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "example": {} + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/userPassword" }, + { "$ref": "#/definitions/apiKey" }, + { "$ref": "#/definitions/X509" }, + { "$ref": "#/definitions/symmetricEncryption" }, + { "$ref": "#/definitions/asymmetricEncryption" }, + { "$ref": "#/definitions/HTTPSecurityScheme" } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/NonBearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/BearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/APIKeyHTTPSecurityScheme" } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.1.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.1.0.json new file mode 100644 index 00000000000..a689a7e36a2 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.1.0.json @@ -0,0 +1,921 @@ +{ + "title": "AsyncAPI 1.1.0 schema.", + "id": "http://asyncapi.hitchhq.com/v1/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "topics" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "1.0.0", + "1.1.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "baseTopic": { + "type": "string", + "pattern": "^[^/.]", + "description": "The base topic to the API. Example: 'hitch'.", + "default": "" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "topics": { + "$ref": "#/definitions/topics" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "type": "string", + "format": "uri" + } + } + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "scheme" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "scheme": { + "type": "string", + "description": "The transfer protocol.", + "enum": [ + "kafka", + "kafka-secure", + "amqp", + "amqps", + "mqtt", + "mqtts", + "secure-mqtt", + "ws", + "wss", + "stomp", + "stomps", + "jms" + ] + }, + "schemeVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "topics": { + "type": "object", + "description": "Relative paths to the individual topics. They must be relative to the 'baseTopic'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^[^.]": { + "$ref": "#/definitions/topicItem" + } + }, + "additionalProperties": false + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9\\.\\-_]+$": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/SecurityScheme" } + ] + } + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + } + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "topicItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "properties": { + "$ref": { + "type": "string" + }, + "parameters": { + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/parameter" + } + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "schema": { + "$ref": "#/definitions/schema" + } + } + }, + "operation": { + "oneOf": [ + { "$ref": "#/definitions/message" }, + { + "type": "object", + "required": [ "oneOf" ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/message" + } + } + } + } + ] + }, + "message": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/schema" + }, + "payload": { + "$ref": "#/definitions/schema" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "example": {} + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/userPassword" }, + { "$ref": "#/definitions/apiKey" }, + { "$ref": "#/definitions/X509" }, + { "$ref": "#/definitions/symmetricEncryption" }, + { "$ref": "#/definitions/asymmetricEncryption" }, + { "$ref": "#/definitions/HTTPSecurityScheme" } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/NonBearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/BearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/APIKeyHTTPSecurityScheme" } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.2.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.2.0.json new file mode 100644 index 00000000000..8d382d9c181 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/1.2.0.json @@ -0,0 +1,1084 @@ +{ + "title": "AsyncAPI 1.2.0 schema.", + "id": "http://asyncapi.hitchhq.com/v1/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "asyncapi", + "info" + ], + "oneOf": [ + { + "required": [ + "topics" + ] + }, + { + "required": [ + "stream" + ] + }, + { + "required": [ + "events" + ] + } + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "1.0.0", + "1.1.0", + "1.2.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "baseTopic": { + "type": "string", + "pattern": "^[^/.]", + "description": "The base topic to the API. Example: 'hitch'.", + "default": "" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "topics": { + "$ref": "#/definitions/topics" + }, + "stream": { + "$ref": "#/definitions/stream", + "description": "The list of messages a consumer can read or write from/to a streaming API." + }, + "events": { + "$ref": "#/definitions/events", + "description": "The list of messages an events API sends and/or receives." + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "type": "string", + "format": "uri" + } + } + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "scheme" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "scheme": { + "type": "string", + "description": "The transfer protocol.", + "enum": [ + "kafka", + "kafka-secure", + "amqp", + "amqps", + "mqtt", + "mqtts", + "secure-mqtt", + "ws", + "wss", + "stomp", + "stomps", + "jms", + "http", + "https" + ] + }, + "schemeVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "topics": { + "type": "object", + "description": "Relative paths to the individual topics. They must be relative to the 'baseTopic'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^[^.]": { + "$ref": "#/definitions/topicItem" + } + }, + "additionalProperties": false + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9\\.\\-_]+$": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/SecurityScheme" } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable topic parameters." + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + } + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "topicItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "properties": { + "$ref": { + "type": "string" + }, + "parameters": { + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/parameter" + } + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "$ref": { + "type": "string" + } + } + }, + "operation": { + "oneOf": [ + { "$ref": "#/definitions/message" }, + { + "type": "object", + "required": [ "oneOf" ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/message" + } + } + } + } + ] + }, + "stream": { + "title": "Stream Object", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "properties": { + "framing": { + "title": "Stream Framing Object", + "type": "object", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "oneOf": [ + { + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "chunked" + ] + }, + "delimiter": { + "type": "string", + "enum": [ + "\\r\\n", + "\\n" + ], + "default": "\\r\\n" + } + } + }, + { + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "sse" + ] + }, + "delimiter": { + "type": "string", + "enum": [ + "\\n\\n" + ], + "default": "\\n\\n" + } + } + } + ] + }, + "read": { + "title": "Stream Read Object", + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/message" + } + }, + "write": { + "title": "Stream Write Object", + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + "events": { + "title": "Events Object", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "minProperties": 1, + "anyOf": [ + { + "required": [ + "receive" + ] + }, + { + "required": [ + "send" + ] + } + ], + "properties": { + "receive": { + "title": "Events Receive Object", + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/message" + } + }, + "send": { + "title": "Events Send Object", + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + "message": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/schema" + }, + "payload": { + "$ref": "#/definitions/schema" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "example": {} + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/userPassword" }, + { "$ref": "#/definitions/apiKey" }, + { "$ref": "#/definitions/X509" }, + { "$ref": "#/definitions/symmetricEncryption" }, + { "$ref": "#/definitions/asymmetricEncryption" }, + { "$ref": "#/definitions/HTTPSecurityScheme" } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/NonBearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/BearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/APIKeyHTTPSecurityScheme" } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": {} + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc1.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc1.json new file mode 100644 index 00000000000..34754228d0b --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc1.json @@ -0,0 +1,1349 @@ +{ + "title": "AsyncAPI 2.0.0-rc1 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "id", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.0.0-rc1" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri-reference" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "protocol" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "baseChannel": { + "type": "string", + "x-format": "uri-path" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/SecurityScheme" } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/correlationId" } + ] + } + } + }, + "traits": { + "$ref": "#/definitions/traits" + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {}, + "examples": { + "type": "array", + "items": {} + } + }, + "additionalProperties": false + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + } + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "minProperties": 1, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "array", + "uniqueItems": true, + "minItems": 1, + "items": { + "$ref": "#/definitions/parameter" + } + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "protocolInfo": { + "type": "object", + "additionalProperties": { + "type": "object" + } + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/operationTrait" }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "protocolInfo": { + "type": "object", + "additionalProperties": { + "type": "object" + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/message" + }, + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/message" + } + } + } + } + ] + } + } + }, + "message": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + {"$ref": "#/definitions/schema" } + ] + } + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { "$ref": "#/definitions/Reference" }, + { "$ref": "#/definitions/correlationId" } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "protocolInfo": { + "type": "object", + "additionalProperties": { + "type": "object" + } + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(/\\w+)+" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "traits": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#/definitions/operationTrait" }, + { "$ref": "#/definitions/messageTrait" } + ] + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "protocolInfo": { + "type": "object", + "additionalProperties": { + "type": "object" + } + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/schema" + } + ] + } + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "protocolInfo": { + "type": "object", + "additionalProperties": { + "type": "object" + } + } + } + }, + "SecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/userPassword" }, + { "$ref": "#/definitions/apiKey" }, + { "$ref": "#/definitions/X509" }, + { "$ref": "#/definitions/symmetricEncryption" }, + { "$ref": "#/definitions/asymmetricEncryption" }, + { "$ref": "#/definitions/HTTPSecurityScheme" }, + { "$ref": "#/definitions/oauth2Flows" }, + { "$ref": "#/definitions/openIdConnect" } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { "$ref": "#/definitions/NonBearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/BearerHTTPSecurityScheme" }, + { "$ref": "#/definitions/APIKeyHTTPSecurityScheme" } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ "type", "flows" ], + "properties": { + "type": { + "type": "string", + "enum": [ "oauth2" ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { "$ref": "#/definitions/oauth2Flow" }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { "not": { "required": ["tokenUrl"] } } + ] + }, + "password": { + "allOf": [ + { "$ref": "#/definitions/oauth2Flow" }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { "not": { "required": ["authorizationUrl"] } } + ] + }, + "clientCredentials": { + "allOf": [ + { "$ref": "#/definitions/oauth2Flow" }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { "not": { "required": ["authorizationUrl"] } } + ] + }, + "authorizationCode": { + "allOf": [ + { "$ref": "#/definitions/oauth2Flow" }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false, + "minProperties": 1 + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc2.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc2.json new file mode 100644 index 00000000000..06f13716e5c --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0-rc2.json @@ -0,0 +1,1350 @@ +{ + "title": "AsyncAPI 2.0.0-rc2 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.0.0-rc2" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "protocol" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "type": "object", + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 2, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "minProperties": 1, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/schema" + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/schema" + } + ] + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false, + "minProperties": 1 + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0.json new file mode 100644 index 00000000000..21e9b20a705 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.0.0.json @@ -0,0 +1,1381 @@ +{ + "title": "AsyncAPI 2.0.0 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.0.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "protocol" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.1.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.1.0.json new file mode 100644 index 00000000000..72b351a22fd --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.1.0.json @@ -0,0 +1,1499 @@ +{ + "title": "AsyncAPI 2.1.0 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.1.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "protocol" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "anyOf": [ + {"required": ["payload"] }, + {"required": ["headers"] } + ], + "properties": { + "name": { + "type": "string", + "description": "Machine readable name of the message example." + }, + "summary": { + "type": "string", + "description": "A brief summary of the message example." + }, + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {}, + "ibmmq": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "anyOf": [ + {"required": ["payload"] }, + {"required": ["headers"] } + ], + "properties": { + "name": { + "type": "string", + "description": "Machine readable name of the message example." + }, + "summary": { + "type": "string", + "description": "A brief summary of the message example." + }, + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + }, + { + "$ref": "#/definitions/SaslSecurityScheme" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/SaslPlainSecurityScheme" + }, + { + "$ref": "#/definitions/SaslScramSecurityScheme" + }, + { + "$ref": "#/definitions/SaslGssapiSecurityScheme" + } + ] + }, + "SaslPlainSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "plain" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslScramSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "scramSha256", + "scramSha512" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslGssapiSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gssapi" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.2.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.2.0.json new file mode 100644 index 00000000000..8bab82c5f67 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.2.0.json @@ -0,0 +1,1489 @@ +{ + "title": "AsyncAPI 2.2.0 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.2.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url", + "protocol" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "servers": { + "type": "array", + "description": "The names of the servers on which this channel is available. If absent or empty then this channel must be available on all servers.", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "anyOf": [ + {"required": ["payload"] }, + {"required": ["headers"] } + ], + "properties": { + "name": { + "type": "string", + "description": "Machine readable name of the message example." + }, + "summary": { + "type": "string", + "description": "A brief summary of the message example." + }, + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "anypointmq": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {}, + "ibmmq": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + }, + { + "$ref": "#/definitions/SaslSecurityScheme" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/SaslPlainSecurityScheme" + }, + { + "$ref": "#/definitions/SaslScramSecurityScheme" + }, + { + "$ref": "#/definitions/SaslGssapiSecurityScheme" + } + ] + }, + "SaslPlainSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "plain" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslScramSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "scramSha256", + "scramSha512" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslGssapiSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gssapi" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.3.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.3.0.json new file mode 100644 index 00000000000..8d4f83078e8 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.3.0.json @@ -0,0 +1,1502 @@ +{ + "title": "AsyncAPI 2.3.0 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.3.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "$ref": "#/definitions/servers" + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "anyOf" : [ + { "required" : ["url", "protocol"] }, + { "required" : ["$ref"] } + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "servers": { + "$ref": "#/definitions/servers" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "servers": { + "type": "array", + "description": "The names of the servers on which this channel is available. If absent or empty then this channel must be available on all servers.", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "anyOf": [ + {"required": ["payload"] }, + {"required": ["headers"] } + ], + "properties": { + "name": { + "type": "string", + "description": "Machine readable name of the message example." + }, + "summary": { + "type": "string", + "description": "A brief summary of the message example." + }, + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "anypointmq": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {}, + "ibmmq": {}, + "solace": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + }, + { + "$ref": "#/definitions/SaslSecurityScheme" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/SaslPlainSecurityScheme" + }, + { + "$ref": "#/definitions/SaslScramSecurityScheme" + }, + { + "$ref": "#/definitions/SaslGssapiSecurityScheme" + } + ] + }, + "SaslPlainSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "plain" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslScramSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "scramSha256", + "scramSha512" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslGssapiSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gssapi" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.4.0.json b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.4.0.json new file mode 100644 index 00000000000..3fce2a18e47 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/schemas/2.4.0.json @@ -0,0 +1,1523 @@ +{ + "title": "AsyncAPI 2.4.0 schema.", + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "asyncapi", + "info", + "channels" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "asyncapi": { + "type": "string", + "enum": [ + "2.4.0" + ], + "description": "The AsyncAPI specification version of this document." + }, + "id": { + "type": "string", + "description": "A unique id representing the application.", + "format": "uri" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "$ref": "#/definitions/servers" + }, + "defaultContentType": { + "type": "string" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "components": { + "$ref": "#/definitions/components" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "Reference": { + "type": "object", + "required": [ + "$ref" + ], + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "ReferenceObject": { + "type": "string", + "format": "uri-reference" + }, + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. CommonMark is allowed." + }, + "termsOfService": { + "type": "string", + "description": "A URL to the Terms of Service for the API. MUST be in the format of a URL.", + "format": "uri" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "anyOf" : [ + { "required" : ["url", "protocol"] }, + { "required" : ["$ref"] } + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "protocol": { + "type": "string", + "description": "The transfer protocol." + }, + "protocolVersion": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "servers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/server" + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + }, + "examples": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "channels": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri-template", + "minLength": 1 + }, + "additionalProperties": { + "$ref": "#/definitions/channelItem" + } + }, + "components": { + "type": "object", + "description": "An object to hold a set of reusable objects for different aspects of the AsyncAPI Specification.", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemas" + }, + "servers": { + "$ref": "#/definitions/servers" + }, + "serverVariables": { + "$ref": "#/definitions/serverVariables" + }, + "channels": { + "$ref": "#/definitions/channels" + }, + "messages": { + "$ref": "#/definitions/messages" + }, + "securitySchemes": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/SecurityScheme" + } + ] + } + } + }, + "parameters": { + "$ref": "#/definitions/parameters" + }, + "correlationIds": { + "type": "object", + "patternProperties": { + "^[\\w\\d\\.\\-_]+$": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + } + } + }, + "operationTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/operationTrait" + } + }, + "messageTraits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/messageTrait" + } + }, + "serverBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "channelBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "operationBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + }, + "messageBindings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/bindingsObject" + } + } + } + }, + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "JSON objects describing schemas the API uses." + }, + "messages": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/message" + }, + "description": "JSON objects describing the messages being consumed and produced by the API." + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "JSON objects describing re-usable channel parameters." + }, + "schema": { + "allOf": [ + { + "$ref": "http://json-schema.org/draft-07/schema#" + }, + { + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "oneOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "anyOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "not": { + "$ref": "#/definitions/schema" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "propertyNames": { + "$ref": "#/definitions/schema" + }, + "contains": { + "$ref": "#/definitions/schema" + }, + "discriminator": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + } + } + } + ] + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "channelItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "$ref": "#/definitions/ReferenceObject" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + } + }, + "description": { + "type": "string", + "description": "A description of the channel." + }, + "servers": { + "type": "array", + "description": "The names of the servers on which this channel is available. If absent or empty then this channel must be available on all servers.", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "publish": { + "$ref": "#/definitions/operation" + }, + "subscribe": { + "$ref": "#/definitions/operation" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "parameter": { + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "schema": { + "$ref": "#/definitions/schema" + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the parameter value", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + }, + "$ref": { + "$ref": "#/definitions/ReferenceObject" + } + } + }, + "operation": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/operationTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "message": { + "$ref": "#/definitions/message" + } + } + }, + "message": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "oneOf": [ + { + "type": "object", + "required": [ + "oneOf" + ], + "additionalProperties": false, + "properties": { + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/message" + } + } + } + }, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "messageId": { + "type": "string" + }, + "payload": {}, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "anyOf": [ + {"required": ["payload"] }, + {"required": ["headers"] } + ], + "properties": { + "name": { + "type": "string", + "description": "Machine readable name of the message example." + }, + "summary": { + "type": "string", + "description": "A brief summary of the message example." + }, + "headers": { + "type": "object" + }, + "payload": {} + } + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + }, + "traits": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + }, + { + "type": "array", + "items": [ + { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/messageTrait" + } + ] + }, + { + "type": "object", + "additionalItems": true + } + ] + } + ] + } + } + } + } + ] + } + ] + }, + "bindingsObject": { + "type": "object", + "additionalProperties": true, + "properties": { + "http": {}, + "ws": {}, + "amqp": {}, + "amqp1": {}, + "mqtt": {}, + "mqtt5": {}, + "kafka": {}, + "anypointmq": {}, + "nats": {}, + "jms": {}, + "sns": {}, + "sqs": {}, + "stomp": {}, + "redis": {}, + "ibmmq": {}, + "solace": {} + } + }, + "correlationId": { + "type": "object", + "required": [ + "location" + ], + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A optional description of the correlation ID. GitHub Flavored Markdown is allowed." + }, + "location": { + "type": "string", + "description": "A runtime expression that specifies the location of the correlation ID", + "pattern": "^\\$message\\.(header|payload)#(\\/(([^\\/~])|(~[01]))*)*" + } + } + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "operationTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/SecurityRequirement" + } + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "messageTrait": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemaFormat": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "headers": { + "allOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "properties": { + "type": { + "const": "object" + } + } + } + ] + }, + "messageId": { + "type": "string" + }, + "correlationId": { + "oneOf": [ + { + "$ref": "#/definitions/Reference" + }, + { + "$ref": "#/definitions/correlationId" + } + ] + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the message." + }, + "name": { + "type": "string", + "description": "Name of the message." + }, + "title": { + "type": "string", + "description": "A human-friendly title for the message." + }, + "description": { + "type": "string", + "description": "A longer description of the message. CommonMark is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": { + "type": "object" + } + }, + "bindings": { + "$ref": "#/definitions/bindingsObject" + } + } + }, + "SecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/userPassword" + }, + { + "$ref": "#/definitions/apiKey" + }, + { + "$ref": "#/definitions/X509" + }, + { + "$ref": "#/definitions/symmetricEncryption" + }, + { + "$ref": "#/definitions/asymmetricEncryption" + }, + { + "$ref": "#/definitions/HTTPSecurityScheme" + }, + { + "$ref": "#/definitions/oauth2Flows" + }, + { + "$ref": "#/definitions/openIdConnect" + }, + { + "$ref": "#/definitions/SaslSecurityScheme" + } + ] + }, + "userPassword": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "userPassword" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "apiKey": { + "type": "object", + "required": [ + "type", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "in": { + "type": "string", + "enum": [ + "user", + "password" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "X509": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "X509" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "symmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "symmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "asymmetricEncryption": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "asymmetricEncryption" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "HTTPSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/NonBearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/BearerHTTPSecurityScheme" + }, + { + "$ref": "#/definitions/APIKeyHTTPSecurityScheme" + } + ] + }, + "NonBearerHTTPSecurityScheme": { + "not": { + "type": "object", + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + } + } + }, + "type": "object", + "required": [ + "scheme", + "type" + ], + "properties": { + "scheme": { + "type": "string" + }, + "description": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "BearerHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "scheme" + ], + "properties": { + "scheme": { + "type": "string", + "enum": [ + "bearer" + ] + }, + "bearerFormat": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "http" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "APIKeyHTTPSecurityScheme": { + "type": "object", + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "httpApiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query", + "cookie" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslSecurityScheme": { + "oneOf": [ + { + "$ref": "#/definitions/SaslPlainSecurityScheme" + }, + { + "$ref": "#/definitions/SaslScramSecurityScheme" + }, + { + "$ref": "#/definitions/SaslGssapiSecurityScheme" + } + ] + }, + "SaslPlainSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "plain" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslScramSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "scramSha256", + "scramSha512" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SaslGssapiSecurityScheme": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "gssapi" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Flows": { + "type": "object", + "required": [ + "type", + "flows" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "description": { + "type": "string" + }, + "flows": { + "type": "object", + "properties": { + "implicit": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "tokenUrl" + ] + } + } + ] + }, + "password": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "clientCredentials": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "tokenUrl", + "scopes" + ] + }, + { + "not": { + "required": [ + "authorizationUrl" + ] + } + } + ] + }, + "authorizationCode": { + "allOf": [ + { + "$ref": "#/definitions/oauth2Flow" + }, + { + "required": [ + "authorizationUrl", + "tokenUrl", + "scopes" + ] + } + ] + } + }, + "additionalProperties": false + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "oauth2Flow": { + "type": "object", + "properties": { + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "refreshUrl": { + "type": "string", + "format": "uri" + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "openIdConnect": { + "type": "object", + "required": [ + "type", + "openIdConnectUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "openIdConnect" + ] + }, + "description": { + "type": "string" + }, + "openIdConnectUrl": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-[\\w\\d\\.\\x2d_]+$": { + "$ref": "#/definitions/specificationExtension" + } + }, + "additionalProperties": false + }, + "SecurityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + } +} diff --git a/vendor/github.com/asyncapi/spec-json-schemas/v2/spec-json-schemas.go b/vendor/github.com/asyncapi/spec-json-schemas/v2/spec-json-schemas.go new file mode 100644 index 00000000000..31082c6e964 --- /dev/null +++ b/vendor/github.com/asyncapi/spec-json-schemas/v2/spec-json-schemas.go @@ -0,0 +1,41 @@ +package spec_json_schemas + +import ( + "embed" + "fmt" + "strings" +) + +//go:embed schemas +var fs embed.FS +var specs = make(map[string][]byte) + +// Get retrieves the Schema for a given version. Nil if not found. +func Get(version string) ([]byte, error) { + if specs[version] == nil { + schemas, err := fs.ReadDir("schemas") + if err != nil { + return nil, err + } + + if len(schemas) == len(specs) { + // No more files to load + return nil, nil + } + + for _, f := range schemas { + if f.IsDir() { + continue + } + + raw, err := fs.ReadFile(fmt.Sprintf("schemas/%s", f.Name())) + if err != nil { + return nil, err + } + + specs[strings.TrimSuffix(f.Name(), ".json")] = raw + } + } + + return specs[version], nil +} \ No newline at end of file diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/.gitignore b/vendor/github.com/bshuster-repo/logrus-logstash-hook/.gitignore new file mode 100644 index 00000000000..42067232927 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.iml diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/.travis.yml b/vendor/github.com/bshuster-repo/logrus-logstash-hook/.travis.yml new file mode 100644 index 00000000000..60c00ef6629 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: tip + +install: + - # Skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/CHANGELOG.md b/vendor/github.com/bshuster-repo/logrus-logstash-hook/CHANGELOG.md new file mode 100644 index 00000000000..31c8b5f3472 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/CHANGELOG.md @@ -0,0 +1,18 @@ +# Changelog + +## 0.4 + + * Update the name of the package from `logrus_logstash` to `logrustash` + * Add TimeFormat to Hook + * Replace the old logrus package path: `github.com/Sirupsen/logrus` with `github.com/sirupsen/logrus` + +## 0.3 + + * Fix the Logstash format to set `@version` to `"1"` + * Add unit-tests to logstash.go + * Remove the assert package + * Add prefix filtering + +## Before that (major changes) + + * Update LICENSE to MIT from GPL diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/LICENSE b/vendor/github.com/bshuster-repo/logrus-logstash-hook/LICENSE new file mode 100644 index 00000000000..3fb4442f849 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Boaz Shuster + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md new file mode 100644 index 00000000000..9cc4378c504 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md @@ -0,0 +1,106 @@ +# Logstash hook for logrus :walrus: [![Build Status](https://travis-ci.org/bshuster-repo/logrus-logstash-hook.svg?branch=master)](https://travis-ci.org/bshuster-repo/logrus-logstash-hook) +Use this hook to send the logs to [Logstash](https://www.elastic.co/products/logstash) over both UDP and TCP. + +## Usage + +```go +package main + +import ( + "github.com/sirupsen/logrus" + "github.com/bshuster-repo/logrus-logstash-hook" +) + +func main() { + log := logrus.New() + hook, err := logrustash.NewHook("tcp", "172.17.0.2:9999", "myappName") + + if err != nil { + log.Fatal(err) + } + log.Hooks.Add(hook) + ctx := log.WithFields(logrus.Fields{ + "method": "main", + }) + ... + ctx.Info("Hello World!") +} +``` + +This is how it will look like: + +```ruby +{ + "@timestamp" => "2016-02-29T16:57:23.000Z", + "@version" => "1", + "level" => "info", + "message" => "Hello World!", + "method" => "main", + "host" => "172.17.0.1", + "port" => 45199, + "type" => "myappName" +} +``` +## Hook Fields +Fields can be added to the hook, which will always be in the log context. +This can be done when creating the hook: + +```go + +hook, err := logrustash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ + "hostname": os.Hostname(), + "serviceName": "myServiceName", +}) +``` + +Or afterwards: + +```go + +hook.WithFields(logrus.Fields{ + "hostname": os.Hostname(), + "serviceName": "myServiceName", +}) +``` +This allows you to set up the hook so logging is available immediately, and add important fields as they become available. + +Single fields can be added/updated using 'WithField': + +```go + +hook.WithField("status", "running") +``` + + + +## Field prefix + +The hook allows you to send logging to logstash and also retain the default std output in text format. +However to keep this console output readable some fields might need to be omitted from the default non-hooked log output. +Each hook can be configured with a prefix used to identify fields which are only to be logged to the logstash connection. +For example if you don't want to see the hostname and serviceName on each log line in the console output you can add a prefix: + +```go + + +hook, err := logrustash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ + "_hostname": os.Hostname(), + "_serviceName": "myServiceName", +}) +... +hook.WithPrefix("_") +``` + +There are also constructors available which allow you to specify the prefix from the start. +The std-out will not have the '\_hostname' and '\_servicename' fields, and the logstash output will, but the prefix will be dropped from the name. + + +# Authors + +Name | Github | Twitter | +------------ | --------- | ---------- | +Boaz Shuster | ripcurld0 | @ripcurld0 | + +# License + +MIT. diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go new file mode 100644 index 00000000000..1f2e5a0cb96 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go @@ -0,0 +1,133 @@ +package logrustash + +import ( + "net" + "strings" + + "github.com/sirupsen/logrus" +) + +// Hook represents a connection to a Logstash instance +type Hook struct { + conn net.Conn + appName string + alwaysSentFields logrus.Fields + hookOnlyPrefix string + TimeFormat string +} + +// NewHook creates a new hook to a Logstash instance, which listens on +// `protocol`://`address`. +func NewHook(protocol, address, appName string) (*Hook, error) { + return NewHookWithFields(protocol, address, appName, make(logrus.Fields)) +} + +// NewHookWithConn creates a new hook to a Logstash instance, using the supplied connection +func NewHookWithConn(conn net.Conn, appName string) (*Hook, error) { + return NewHookWithFieldsAndConn(conn, appName, make(logrus.Fields)) +} + +// NewHookWithFields creates a new hook to a Logstash instance, which listens on +// `protocol`://`address`. alwaysSentFields will be sent with every log entry. +func NewHookWithFields(protocol, address, appName string, alwaysSentFields logrus.Fields) (*Hook, error) { + return NewHookWithFieldsAndPrefix(protocol, address, appName, alwaysSentFields, "") +} + +// NewHookWithFieldsAndPrefix creates a new hook to a Logstash instance, which listens on +// `protocol`://`address`. alwaysSentFields will be sent with every log entry. prefix is used to select fields to filter +func NewHookWithFieldsAndPrefix(protocol, address, appName string, alwaysSentFields logrus.Fields, prefix string) (*Hook, error) { + conn, err := net.Dial(protocol, address) + if err != nil { + return nil, err + } + return NewHookWithFieldsAndConnAndPrefix(conn, appName, alwaysSentFields, prefix) +} + +// NewHookWithFieldsAndConn creates a new hook to a Logstash instance using the supplied connection +func NewHookWithFieldsAndConn(conn net.Conn, appName string, alwaysSentFields logrus.Fields) (*Hook, error) { + return NewHookWithFieldsAndConnAndPrefix(conn, appName, alwaysSentFields, "") +} + +//NewHookWithFieldsAndConnAndPrefix creates a new hook to a Logstash instance using the suppolied connection and prefix +func NewHookWithFieldsAndConnAndPrefix(conn net.Conn, appName string, alwaysSentFields logrus.Fields, prefix string) (*Hook, error) { + return &Hook{conn: conn, appName: appName, alwaysSentFields: alwaysSentFields, hookOnlyPrefix: prefix}, nil +} + +//NewFilterHook makes a new hook which does not forward to logstash, but simply enforces the prefix rules +func NewFilterHook() *Hook { + return NewFilterHookWithPrefix("") +} + +//NewFilterHookWithPrefix make a new hook which does not forward to logstash, but simply enforces the specified prefix +func NewFilterHookWithPrefix(prefix string) *Hook { + return &Hook{conn: nil, appName: "", alwaysSentFields: make(logrus.Fields), hookOnlyPrefix: prefix} +} + +func (h *Hook) filterHookOnly(entry *logrus.Entry) { + if h.hookOnlyPrefix != "" { + for key := range entry.Data { + if strings.HasPrefix(key, h.hookOnlyPrefix) { + delete(entry.Data, key) + } + } + } + +} + +//WithPrefix sets a prefix filter to use in all subsequent logging +func (h *Hook) WithPrefix(prefix string) { + h.hookOnlyPrefix = prefix +} + +func (h *Hook) WithField(key string, value interface{}) { + h.alwaysSentFields[key] = value +} + +func (h *Hook) WithFields(fields logrus.Fields) { + //Add all the new fields to the 'alwaysSentFields', possibly overwriting exising fields + for key, value := range fields { + h.alwaysSentFields[key] = value + } +} + +func (h *Hook) Fire(entry *logrus.Entry) error { + //make sure we always clear the hookonly fields from the entry + defer h.filterHookOnly(entry) + + // Add in the alwaysSentFields. We don't override fields that are already set. + for k, v := range h.alwaysSentFields { + if _, inMap := entry.Data[k]; !inMap { + entry.Data[k] = v + } + } + + //For a filteringHook, stop here + if h.conn == nil { + return nil + } + + formatter := LogstashFormatter{Type: h.appName} + if h.TimeFormat != "" { + formatter.TimestampFormat = h.TimeFormat + } + + dataBytes, err := formatter.FormatWithPrefix(entry, h.hookOnlyPrefix) + if err != nil { + return err + } + if _, err = h.conn.Write(dataBytes); err != nil { + return err + } + return nil +} + +func (h *Hook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go new file mode 100644 index 00000000000..64bc5c3899b --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go @@ -0,0 +1,81 @@ +package logrustash + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + return f.FormatWithPrefix(entry, "") +} + +func (f *LogstashFormatter) FormatWithPrefix(entry *logrus.Entry, prefix string) ([]byte, error) { + fields := make(logrus.Fields) + for k, v := range entry.Data { + //remvove the prefix when sending the fields to logstash + if prefix != "" && strings.HasPrefix(k, prefix) { + k = strings.TrimPrefix(k, prefix) + } + + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/377 + fields[k] = v.Error() + default: + fields[k] = v + } + } + + fields["@version"] = "1" + + timeStampFormat := f.TimestampFormat + + if timeStampFormat == "" { + timeStampFormat = time.RFC3339 + } + + fields["@timestamp"] = entry.Time.Format(timeStampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + fields["fields.message"] = v + } + fields["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + fields["fields.level"] = v + } + fields["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + fields["fields.type"] = v + } + fields["type"] = f.Type + } + + serialized, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/buger/jsonparser/.gitignore b/vendor/github.com/buger/jsonparser/.gitignore new file mode 100644 index 00000000000..5598d8a5691 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.gitignore @@ -0,0 +1,12 @@ + +*.test + +*.out + +*.mprof + +.idea + +vendor/github.com/buger/goterm/ +prof.cpu +prof.mem diff --git a/vendor/github.com/buger/jsonparser/.travis.yml b/vendor/github.com/buger/jsonparser/.travis.yml new file mode 100644 index 00000000000..dbfb7cf9883 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.travis.yml @@ -0,0 +1,11 @@ +language: go +arch: + - amd64 + - ppc64le +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x +script: go test -v ./. diff --git a/vendor/github.com/buger/jsonparser/Dockerfile b/vendor/github.com/buger/jsonparser/Dockerfile new file mode 100644 index 00000000000..37fc9fd0b4d --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.6 + +RUN go get github.com/Jeffail/gabs +RUN go get github.com/bitly/go-simplejson +RUN go get github.com/pquerna/ffjson +RUN go get github.com/antonholmquist/jason +RUN go get github.com/mreiferson/go-ujson +RUN go get -tags=unsafe -u github.com/ugorji/go/codec +RUN go get github.com/mailru/easyjson + +WORKDIR /go/src/github.com/buger/jsonparser +ADD . /go/src/github.com/buger/jsonparser \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/LICENSE b/vendor/github.com/buger/jsonparser/LICENSE new file mode 100644 index 00000000000..ac25aeb7da2 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/buger/jsonparser/Makefile b/vendor/github.com/buger/jsonparser/Makefile new file mode 100644 index 00000000000..e843368cf10 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Makefile @@ -0,0 +1,36 @@ +SOURCE = parser.go +CONTAINER = jsonparser +SOURCE_PATH = /go/src/github.com/buger/jsonparser +BENCHMARK = JsonParser +BENCHTIME = 5s +TEST = . +DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER) + +build: + docker build -t $(CONTAINER) . + +race: + $(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s + +bench: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v + +bench_local: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v + +profile: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c + +test: + $(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v + +fmt: + $(DRUN) go fmt ./... + +vet: + $(DRUN) go vet ./. + +bash: + $(DRUN) /bin/bash \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/README.md b/vendor/github.com/buger/jsonparser/README.md new file mode 100644 index 00000000000..d7e0ec397af --- /dev/null +++ b/vendor/github.com/buger/jsonparser/README.md @@ -0,0 +1,365 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg) +# Alternative JSON parser for Go (10x times faster standard library) + +It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below. + +## Rationale +Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex. +I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage. +I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures. + + +Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience. + +## Example +For the given JSON our goal is to extract the user's full name, number of github followers and avatar. + +```go +import "github.com/buger/jsonparser" + +... + +data := []byte(`{ + "person": { + "name": { + "first": "Leonid", + "last": "Bugaev", + "fullName": "Leonid Bugaev" + }, + "github": { + "handle": "buger", + "followers": 109 + }, + "avatars": [ + { "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" } + ] + }, + "company": { + "name": "Acme" + } +}`) + +// You can specify key path by providing arguments to Get function +jsonparser.Get(data, "person", "name", "fullName") + +// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type +jsonparser.GetInt(data, "person", "github", "followers") + +// When you try to get object, it will return you []byte slice pointer to data containing it +// In `company` it will be `{"name": "Acme"}` +jsonparser.Get(data, "company") + +// If the key doesn't exist it will throw an error +var size int64 +if value, err := jsonparser.GetInt(data, "company", "size"); err == nil { + size = value +} + +// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN] +jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + fmt.Println(jsonparser.Get(value, "url")) +}, "person", "avatars") + +// Or use can access fields by index! +jsonparser.GetString(data, "person", "avatars", "[0]", "url") + +// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN } +jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType) + return nil +}, "person", "name") + +// The most efficient way to extract multiple keys is `EachKey` + +paths := [][]string{ + []string{"person", "name", "fullName"}, + []string{"person", "avatars", "[0]", "url"}, + []string{"company", "url"}, +} +jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: // []string{"person", "name", "fullName"} + ... + case 1: // []string{"person", "avatars", "[0]", "url"} + ... + case 2: // []string{"company", "url"}, + ... + } +}, paths...) + +// For more information see docs below +``` + +## Need to speedup your app? + +I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com. + +## Reference + +Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it. + +You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser) + + +### **`Get`** +```go +func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error) +``` +Receives data structure, and key path to extract value from. + +Returns: +* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist` + +Accepts multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. + +Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah? + +### **`GetString`** +```go +func GetString(data []byte, keys ...string) (val string, err error) +``` +Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations. + +### **`GetUnsafeString`** +If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations: +```go +s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title") +switch s { + case 'CEO': + ... + case 'Engineer' + ... + ... +} +``` +Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way. + + +### **`GetBoolean`**, **`GetInt`** and **`GetFloat`** +```go +func GetBoolean(data []byte, keys ...string) (val bool, err error) + +func GetFloat(data []byte, keys ...string) (val float64, err error) + +func GetInt(data []byte, keys ...string) (val int64, err error) +``` +If you know the key type, you can use the helpers above. +If key data type do not match, it will return error. + +### **`ArrayEach`** +```go +func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string) +``` +Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`. + +### **`ObjectEach`** +```go +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) +``` +Needed for iterating object, accepts a callback function. Example: +```go +var handler func([]byte, []byte, jsonparser.ValueType, int) error +handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + //do stuff here +} +jsonparser.ObjectEach(myJson, handler) +``` + + +### **`EachKey`** +```go +func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string) +``` +When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well! + +```go +paths := [][]string{ + []string{"uuid"}, + []string{"tz"}, + []string{"ua"}, + []string{"st"}, +} +var data SmallPayload + +jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: + data.Uuid, _ = value + case 1: + v, _ := jsonparser.ParseInt(value) + data.Tz = int(v) + case 2: + data.Ua, _ = value + case 3: + v, _ := jsonparser.ParseInt(value) + data.St = int(v) + } +}, paths...) +``` + +### **`Set`** +```go +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) +``` +Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with updated or added key value. +* `err` - If any parsing issue, it should return error. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")` + +### **`Delete`** +```go +func Delete(data []byte, keys ...string) value []byte +``` +Receives existing data structure, and key path to delete. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")` + + +## What makes it so fast? +* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`. +* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation. +* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included). +* Does not parse full record, only keys you specified + + +## Benchmarks + +There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads. +For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text. +Benchmarks run on standard Linode 1024 box. + +Compared libraries: +* https://golang.org/pkg/encoding/json +* https://github.com/Jeffail/gabs +* https://github.com/a8m/djson +* https://github.com/bitly/go-simplejson +* https://github.com/antonholmquist/jason +* https://github.com/mreiferson/go-ujson +* https://github.com/ugorji/go/codec +* https://github.com/pquerna/ffjson +* https://github.com/mailru/easyjson +* https://github.com/buger/jsonparser + +#### TLDR +If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`. +`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers. +`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation). + +It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified. + +If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`. + +`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want. + +With great power comes great responsibility! :) + + +#### Small payload + +Each test processes 190 bytes of http log as a JSON record. +It should read multiple fields. +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go + +Library | time/op | bytes/op | allocs/op + ------ | ------- | -------- | ------- +encoding/json struct | 7879 | 880 | 18 +encoding/json interface{} | 8946 | 1521 | 38 +Jeffail/gabs | 10053 | 1649 | 46 +bitly/go-simplejson | 10128 | 2241 | 36 +antonholmquist/jason | 27152 | 7237 | 101 +github.com/ugorji/go/codec | 8806 | 2176 | 31 +mreiferson/go-ujson | **7008** | **1409** | 37 +a8m/djson | 3862 | 1249 | 30 +pquerna/ffjson | **3769** | **624** | **15** +mailru/easyjson | **2002** | **192** | **9** +buger/jsonparser | **1367** | **0** | **0** +buger/jsonparser (EachKey API) | **809** | **0** | **0** + +Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson. +If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it. + +#### Medium payload + +Each test processes a 2.4kb JSON record (based on Clearbit API). +It should read multiple nested fields and 1 array. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| ------- | ------- | -------- | --------- | +| encoding/json struct | 57749 | 1336 | 29 | +| encoding/json interface{} | 79297 | 10627 | 215 | +| Jeffail/gabs | 83807 | 11202 | 235 | +| bitly/go-simplejson | 88187 | 17187 | 220 | +| antonholmquist/jason | 94099 | 19013 | 247 | +| github.com/ugorji/go/codec | 114719 | 6712 | 152 | +| mreiferson/go-ujson | **56972** | 11547 | 270 | +| a8m/djson | 28525 | 10196 | 198 | +| pquerna/ffjson | **20298** | **856** | **20** | +| mailru/easyjson | **10512** | **336** | **12** | +| buger/jsonparser | **15955** | **0** | **0** | +| buger/jsonparser (EachKey API) | **8916** | **0** | **0** | + +The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload. + +`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round. +`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads. + + +#### Large payload + +Each test processes a 24kb JSON record (based on Discourse API) +It should read 2 arrays, and for each item in array get a few fields. +Basically it means processing a full JSON file. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| --- | --- | --- | --- | +| encoding/json struct | 748336 | 8272 | 307 | +| encoding/json interface{} | 1224271 | 215425 | 3395 | +| a8m/djson | 510082 | 213682 | 2845 | +| pquerna/ffjson | **312271** | **7792** | **298** | +| mailru/easyjson | **154186** | **6992** | **288** | +| buger/jsonparser | **85308** | **0** | **0** | + +`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough) + +Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient. + +## Questions and support + +All bug-reports and suggestions should go though Github Issues. + +## Contributing + +1. Fork it +2. Create your feature branch (git checkout -b my-new-feature) +3. Commit your changes (git commit -am 'Added some feature') +4. Push to the branch (git push origin my-new-feature) +5. Create new Pull Request + +## Development + +All my development happens using Docker, and repo include some Make tasks to simplify development. + +* `make build` - builds docker image, usually can be called only once +* `make test` - run tests +* `make fmt` - run go fmt +* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file) +* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof` +* `make bash` - enter container (i use it for running `go tool pprof` above) diff --git a/vendor/github.com/buger/jsonparser/bytes.go b/vendor/github.com/buger/jsonparser/bytes.go new file mode 100644 index 00000000000..0bb0ff39562 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes.go @@ -0,0 +1,47 @@ +package jsonparser + +import ( + bio "bytes" +) + +// minInt64 '-9223372036854775808' is the smallest representable number in int64 +const minInt64 = `9223372036854775808` + +// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON +func parseInt(bytes []byte) (v int64, ok bool, overflow bool) { + if len(bytes) == 0 { + return 0, false, false + } + + var neg bool = false + if bytes[0] == '-' { + neg = true + bytes = bytes[1:] + } + + var b int64 = 0 + for _, c := range bytes { + if c >= '0' && c <= '9' { + b = (10 * v) + int64(c-'0') + } else { + return 0, false, false + } + if overflow = (b < v); overflow { + break + } + v = b + } + + if overflow { + if neg && bio.Equal(bytes, []byte(minInt64)) { + return b, true, false + } + return 0, false, true + } + + if neg { + return -v, true, false + } else { + return v, true, false + } +} diff --git a/vendor/github.com/buger/jsonparser/bytes_safe.go b/vendor/github.com/buger/jsonparser/bytes_safe.go new file mode 100644 index 00000000000..ff16a4a1955 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_safe.go @@ -0,0 +1,25 @@ +// +build appengine appenginevm + +package jsonparser + +import ( + "strconv" +) + +// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file) + +func equalStr(b *[]byte, s string) bool { + return string(*b) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(string(*b), 64) +} + +func bytesToString(b *[]byte) string { + return string(*b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/buger/jsonparser/bytes_unsafe.go b/vendor/github.com/buger/jsonparser/bytes_unsafe.go new file mode 100644 index 00000000000..589fea87eb3 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_unsafe.go @@ -0,0 +1,44 @@ +// +build !appengine,!appenginevm + +package jsonparser + +import ( + "reflect" + "strconv" + "unsafe" + "runtime" +) + +// +// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6, +// the compiler cannot perfectly inline the function when using a non-pointer slice. That is, +// the non-pointer []byte parameter version is slower than if its function body is manually +// inlined, whereas the pointer []byte version is equally fast to the manually inlined +// version. Instruction count in assembly taken from "go tool compile" confirms this difference. +// +// TODO: Remove hack after Go 1.7 release +// +func equalStr(b *[]byte, s string) bool { + return *(*string)(unsafe.Pointer(b)) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64) +} + +// A hack until issue golang/go#2632 is fixed. +// See: https://github.com/golang/go/issues/2632 +func bytesToString(b *[]byte) string { + return *(*string)(unsafe.Pointer(b)) +} + +func StringToBytes(s string) []byte { + b := make([]byte, 0, 0) + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + runtime.KeepAlive(s) + return b +} diff --git a/vendor/github.com/buger/jsonparser/escape.go b/vendor/github.com/buger/jsonparser/escape.go new file mode 100644 index 00000000000..49669b94207 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/escape.go @@ -0,0 +1,173 @@ +package jsonparser + +import ( + "bytes" + "unicode/utf8" +) + +// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7 + +const supplementalPlanesOffset = 0x10000 +const highSurrogateOffset = 0xD800 +const lowSurrogateOffset = 0xDC00 + +const basicMultilingualPlaneReservedOffset = 0xDFFF +const basicMultilingualPlaneOffset = 0xFFFF + +func combineUTF16Surrogates(high, low rune) rune { + return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset) +} + +const badHex = -1 + +func h2I(c byte) int { + switch { + case c >= '0' && c <= '9': + return int(c - '0') + case c >= 'A' && c <= 'F': + return int(c - 'A' + 10) + case c >= 'a' && c <= 'f': + return int(c - 'a' + 10) + } + return badHex +} + +// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and +// is not checked. +// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together. +// This function only handles one; decodeUnicodeEscape handles this more complex case. +func decodeSingleUnicodeEscape(in []byte) (rune, bool) { + // We need at least 6 characters total + if len(in) < 6 { + return utf8.RuneError, false + } + + // Convert hex to decimal + h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5]) + if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex { + return utf8.RuneError, false + } + + // Compose the hex digits + return rune(h1<<12 + h2<<8 + h3<<4 + h4), true +} + +// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters, +// which is used to describe UTF16 chars. +// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane +func isUTF16EncodedRune(r rune) bool { + return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset +} + +func decodeUnicodeEscape(in []byte) (rune, int) { + if r, ok := decodeSingleUnicodeEscape(in); !ok { + // Invalid Unicode escape + return utf8.RuneError, -1 + } else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) { + // Valid Unicode escape in Basic Multilingual Plane + return r, 6 + } else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain + // UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate" + return utf8.RuneError, -1 + } else if r2 < lowSurrogateOffset { + // Invalid UTF16 "low surrogate" + return utf8.RuneError, -1 + } else { + // Valid UTF16 surrogate pair + return combineUTF16Surrogates(r, r2), 12 + } +} + +// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X] +var backslashCharEscapeTable = [...]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns +// how many characters were consumed from 'in' and emitted into 'out'. +// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error. +func unescapeToUTF8(in, out []byte) (inLen int, outLen int) { + if len(in) < 2 || in[0] != '\\' { + // Invalid escape due to insufficient characters for any escape or no initial backslash + return -1, -1 + } + + // https://tools.ietf.org/html/rfc7159#section-7 + switch e := in[1]; e { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid basic 2-character escapes (use lookup table) + out[0] = backslashCharEscapeTable[e] + return 2, 1 + case 'u': + // Unicode escape + if r, inLen := decodeUnicodeEscape(in); inLen == -1 { + // Invalid Unicode escape + return -1, -1 + } else { + // Valid Unicode escape; re-encode as UTF8 + outLen := utf8.EncodeRune(out, r) + return inLen, outLen + } + } + + return -1, -1 +} + +// unescape unescapes the string contained in 'in' and returns it as a slice. +// If 'in' contains no escaped characters: +// Returns 'in'. +// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)): +// 'out' is used to build the unescaped string and is returned with no extra allocation +// Else: +// A new slice is allocated and returned. +func Unescape(in, out []byte) ([]byte, error) { + firstBackslash := bytes.IndexByte(in, '\\') + if firstBackslash == -1 { + return in, nil + } + + // Get a buffer of sufficient size (allocate if needed) + if cap(out) < len(in) { + out = make([]byte, len(in)) + } else { + out = out[0:len(in)] + } + + // Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice) + copy(out, in[:firstBackslash]) + in = in[firstBackslash:] + buf := out[firstBackslash:] + + for len(in) > 0 { + // Unescape the next escaped character + inLen, bufLen := unescapeToUTF8(in, buf) + if inLen == -1 { + return nil, MalformedStringEscapeError + } + + in = in[inLen:] + buf = buf[bufLen:] + + // Copy everything up until the next backslash + nextBackslash := bytes.IndexByte(in, '\\') + if nextBackslash == -1 { + copy(buf, in) + buf = buf[len(in):] + break + } else { + copy(buf, in[:nextBackslash]) + buf = buf[nextBackslash:] + in = in[nextBackslash:] + } + } + + // Trim the out buffer to the amount that was actually emitted + return out[:len(out)-len(buf)], nil +} diff --git a/vendor/github.com/buger/jsonparser/fuzz.go b/vendor/github.com/buger/jsonparser/fuzz.go new file mode 100644 index 00000000000..854bd11b2cd --- /dev/null +++ b/vendor/github.com/buger/jsonparser/fuzz.go @@ -0,0 +1,117 @@ +package jsonparser + +func FuzzParseString(data []byte) int { + r, err := ParseString(data) + if err != nil || r == "" { + return 0 + } + return 1 +} + +func FuzzEachKey(data []byte) int { + paths := [][]string{ + {"name"}, + {"order"}, + {"nested", "a"}, + {"nested", "b"}, + {"nested2", "a"}, + {"nested", "nested3", "b"}, + {"arr", "[1]", "b"}, + {"arrInt", "[3]"}, + {"arrInt", "[5]"}, + {"nested"}, + {"arr", "["}, + {"a\n", "b\n"}, + } + EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...) + return 1 +} + +func FuzzDelete(data []byte) int { + Delete(data, "test") + return 1 +} + +func FuzzSet(data []byte) int { + _, err := Set(data, []byte(`"new value"`), "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzObjectEach(data []byte) int { + _ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error { + return nil + }) + return 1 +} + +func FuzzParseFloat(data []byte) int { + _, err := ParseFloat(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseInt(data []byte) int { + _, err := ParseInt(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseBool(data []byte) int { + _, err := ParseBoolean(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzTokenStart(data []byte) int { + _ = tokenStart(data) + return 1 +} + +func FuzzGetString(data []byte) int { + _, err := GetString(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetFloat(data []byte) int { + _, err := GetFloat(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetInt(data []byte) int { + _, err := GetInt(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetBoolean(data []byte) int { + _, err := GetBoolean(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetUnsafeString(data []byte) int { + _, err := GetUnsafeString(data, "test") + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/buger/jsonparser/go.mod b/vendor/github.com/buger/jsonparser/go.mod new file mode 100644 index 00000000000..7ede21fb38f --- /dev/null +++ b/vendor/github.com/buger/jsonparser/go.mod @@ -0,0 +1,4 @@ +module github.com/buger/jsonparser + +go 1.13 + diff --git a/vendor/github.com/buger/jsonparser/go.sum b/vendor/github.com/buger/jsonparser/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh new file mode 100644 index 00000000000..c573b0e2d10 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh @@ -0,0 +1,47 @@ +#!/bin/bash -eu + +git clone https://github.com/dvyukov/go-fuzz-corpus +zip corpus.zip go-fuzz-corpus/json/corpus/* + +cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring + +cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey + +cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete + +cp corpus.zip $OUT/fuzzset_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset + +cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach + +cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat + +cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint + +cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool + +cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart + +cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring + +cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat + +cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint + +cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean + +cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring + diff --git a/vendor/github.com/buger/jsonparser/parser.go b/vendor/github.com/buger/jsonparser/parser.go new file mode 100644 index 00000000000..14b80bc4838 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/parser.go @@ -0,0 +1,1283 @@ +package jsonparser + +import ( + "bytes" + "errors" + "fmt" + "strconv" +) + +// Errors +var ( + KeyPathNotFoundError = errors.New("Key path not found") + UnknownValueTypeError = errors.New("Unknown value type") + MalformedJsonError = errors.New("Malformed JSON error") + MalformedStringError = errors.New("Value is string, but can't find closing '\"' symbol") + MalformedArrayError = errors.New("Value is array, but can't find closing ']' symbol") + MalformedObjectError = errors.New("Value looks like object, but can't find closing '}' symbol") + MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol") + OverflowIntegerError = errors.New("Value is number, but overflowed while parsing") + MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string") +) + +// How much stack space to allocate for unescaping JSON strings; if a string longer +// than this needs to be escaped, it will result in a heap allocation +const unescapeStackBufSize = 64 + +func tokenEnd(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + return i + } + } + + return len(data) +} + +func findTokenStart(data []byte, token byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case token: + return i + case '[', '{': + return 0 + } + } + + return 0 +} + +func findKeyStart(data []byte, key string) (int, error) { + i := 0 + ln := len(data) + if ln > 0 && (data[0] == '{' || data[0] == '[') { + i = 1 + } + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + if ku, err := Unescape(StringToBytes(key), stackbuf[:]); err == nil { + key = bytesToString(&ku) + } + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + break + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + break + } + + i += valueOffset + + // if string is a key, and key level match + k := data[keyBegin:keyEnd] + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + if keyEscaped { + if ku, err := Unescape(k, stackbuf[:]); err != nil { + break + } else { + k = ku + } + } + + if data[i] == ':' && len(key) == len(k) && bytesToString(&k) == key { + return keyBegin - 1, nil + } + + case '[': + end := blockEnd(data[i:], data[i], ']') + if end != -1 { + i = i + end + } + case '{': + end := blockEnd(data[i:], data[i], '}') + if end != -1 { + i = i + end + } + } + i++ + } + + return -1, KeyPathNotFoundError +} + +func tokenStart(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case '\n', '\r', '\t', ',', '{', '[': + return i + } + } + + return 0 +} + +// Find position of next character which is not whitespace +func nextToken(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Find position of last character which is not whitespace +func lastToken(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func stringEnd(data []byte) (int, bool) { + escaped := false + for i, c := range data { + if c == '"' { + if !escaped { + return i + 1, false + } else { + j := i - 1 + for { + if j < 0 || data[j] != '\\' { + return i + 1, true // even number of backslashes + } + j-- + if j < 0 || data[j] != '\\' { + break // odd number of backslashes + } + j-- + + } + } + } else if c == '\\' { + escaped = true + } + } + + return -1, escaped +} + +// Find end of the data structure, array or object. +// For array openSym and closeSym will be '[' and ']', for object '{' and '}' +func blockEnd(data []byte, openSym byte, closeSym byte) int { + level := 0 + i := 0 + ln := len(data) + + for i < ln { + switch data[i] { + case '"': // If inside string, skip it + se, _ := stringEnd(data[i+1:]) + if se == -1 { + return -1 + } + i += se + case openSym: // If open symbol, increase level + level++ + case closeSym: // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + return i + 1 + } + } + i++ + } + + return -1 +} + +func searchKeys(data []byte, keys ...string) int { + keyLevel := 0 + level := 0 + i := 0 + ln := len(data) + lk := len(keys) + lastMatched := true + + if lk == 0 { + return 0 + } + + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key + if data[i] == ':' { + if level < 1 { + return -1 + } + + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + + if level <= len(keys) { + if equalStr(&keyUnesc, keys[level-1]) { + lastMatched = true + + // if key level match + if keyLevel == level-1 { + keyLevel++ + // If we found all keys in path + if keyLevel == lk { + return i + 1 + } + } + } else { + lastMatched = false + } + } else { + return -1 + } + } else { + i-- + } + case '{': + + // in case parent key is matched then only we will increase the level otherwise can directly + // can move to the end of this block + if !lastMatched { + end := blockEnd(data[i:], '{', '}') + if end == -1 { + return -1 + } + i += end - 1 + } else { + level++ + } + case '}': + level-- + if level == keyLevel { + keyLevel-- + } + case '[': + // If we want to get array element by index + if keyLevel == level && keys[level][0] == '[' { + var keyLen = len(keys[level]) + if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' { + return -1 + } + aIdx, err := strconv.Atoi(keys[level][1 : keyLen-1]) + if err != nil { + return -1 + } + var curIdx int + var valueFound []byte + var valueOffset int + var curI = i + ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if curIdx == aIdx { + valueFound = value + valueOffset = offset + if dataType == String { + valueOffset = valueOffset - 2 + valueFound = data[curI+valueOffset : curI+valueOffset+len(value)+2] + } + } + curIdx += 1 + }) + + if valueFound == nil { + return -1 + } else { + subIndex := searchKeys(valueFound, keys[level+1:]...) + if subIndex < 0 { + return -1 + } + return i + valueOffset + subIndex + } + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ':': // If encountered, JSON data is malformed + return -1 + } + + i++ + } + + return -1 +} + +func sameTree(p1, p2 []string) bool { + minLen := len(p1) + if len(p2) < minLen { + minLen = len(p2) + } + + for pi_1, p_1 := range p1[:minLen] { + if p2[pi_1] != p_1 { + return false + } + } + + return true +} + +func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int { + var x struct{} + pathFlags := make([]bool, len(paths)) + var level, pathsMatched, i int + ln := len(data) + + var maxPath int + for _, p := range paths { + if len(p) > maxPath { + maxPath = len(p) + } + } + + pathsBuf := make([]string, maxPath) + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key, and key level match + if data[i] == ':' { + match := -1 + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else { + var stackbuf [unescapeStackBufSize]byte + if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + } + + if maxPath >= level { + if level < 1 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + pathsBuf[level-1] = bytesToString(&keyUnesc) + for pi, p := range paths { + if len(p) != level || pathFlags[pi] || !equalStr(&keyUnesc, p[level-1]) || !sameTree(p, pathsBuf[:level]) { + continue + } + + match = pi + + pathsMatched++ + pathFlags[pi] = true + + v, dt, _, e := Get(data[i+1:]) + cb(pi, v, dt, e) + + if pathsMatched == len(paths) { + break + } + } + if pathsMatched == len(paths) { + return i + } + } + + if match == -1 { + tokenOffset := nextToken(data[i+1:]) + i += tokenOffset + + if data[i] == '{' { + blockSkip := blockEnd(data[i:], '{', '}') + i += blockSkip + 1 + } + } + + if i < ln { + switch data[i] { + case '{', '}', '[', '"': + i-- + } + } + } else { + i-- + } + case '{': + level++ + case '}': + level-- + case '[': + var ok bool + arrIdxFlags := make(map[int]struct{}) + pIdxFlags := make([]bool, len(paths)) + + if level < 0 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + for pi, p := range paths { + if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { + continue + } + if len(p[level]) >= 2 { + aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) + arrIdxFlags[aIdx] = x + pIdxFlags[pi] = true + } + } + + if len(arrIdxFlags) > 0 { + level++ + + var curIdx int + arrOff, _ := ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if _, ok = arrIdxFlags[curIdx]; ok { + for pi, p := range paths { + if pIdxFlags[pi] { + aIdx, _ := strconv.Atoi(p[level-1][1 : len(p[level-1])-1]) + + if curIdx == aIdx { + of := searchKeys(value, p[level:]...) + + pathsMatched++ + pathFlags[pi] = true + + if of != -1 { + v, dt, _, e := Get(value[of:]) + cb(pi, v, dt, e) + } + } + } + } + } + + curIdx += 1 + }) + + if pathsMatched == len(paths) { + return i + } + + i += arrOff - 1 + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ']': + level-- + } + + i++ + } + + return -1 +} + +// Data types available in valid JSON data. +type ValueType int + +const ( + NotExist = ValueType(iota) + String + Number + Object + Array + Boolean + Null + Unknown +) + +func (vt ValueType) String() string { + switch vt { + case NotExist: + return "non-existent" + case String: + return "string" + case Number: + return "number" + case Object: + return "object" + case Array: + return "array" + case Boolean: + return "boolean" + case Null: + return "null" + default: + return "unknown" + } +} + +var ( + trueLiteral = []byte("true") + falseLiteral = []byte("false") + nullLiteral = []byte("null") +) + +func createInsertComponent(keys []string, setValue []byte, comma, object bool) []byte { + isIndex := string(keys[0][0]) == "[" + offset := 0 + lk := calcAllocateSpace(keys, setValue, comma, object) + buffer := make([]byte, lk, lk) + if comma { + offset += WriteToBuffer(buffer[offset:], ",") + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + if object { + offset += WriteToBuffer(buffer[offset:], "{") + } + if !isIndex { + offset += WriteToBuffer(buffer[offset:], "\"") + offset += WriteToBuffer(buffer[offset:], keys[0]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + offset += WriteToBuffer(buffer[offset:], "{\"") + offset += WriteToBuffer(buffer[offset:], keys[i]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + offset += WriteToBuffer(buffer[offset:], string(setValue)) + for i := len(keys) - 1; i > 0; i-- { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "]") + } else { + offset += WriteToBuffer(buffer[offset:], "}") + } + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "]") + } + if object && !isIndex { + offset += WriteToBuffer(buffer[offset:], "}") + } + return buffer +} + +func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int { + isIndex := string(keys[0][0]) == "[" + lk := 0 + if comma { + // , + lk += 1 + } + if isIndex && !comma { + // [] + lk += 2 + } else { + if object { + // { + lk += 1 + } + if !isIndex { + // "keys[0]" + lk += len(keys[0]) + 3 + } + } + + + lk += len(setValue) + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + // [] + lk += 2 + } else { + // {"keys[i]":setValue} + lk += len(keys[i]) + 5 + } + } + + if object && !isIndex { + // } + lk += 1 + } + + return lk +} + +func WriteToBuffer(buffer []byte, str string) int { + copy(buffer, str) + return len(str) +} + +/* + +Del - Receives existing data structure, path to delete. + +Returns: +`data` - return modified data + +*/ +func Delete(data []byte, keys ...string) []byte { + lk := len(keys) + if lk == 0 { + return data[:0] + } + + array := false + if len(keys[lk-1]) > 0 && string(keys[lk-1][0]) == "[" { + array = true + } + + var startOffset, keyOffset int + endOffset := len(data) + var err error + if !array { + if len(keys) > 1 { + _, _, startOffset, endOffset, err = internalGet(data, keys[:lk-1]...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + } + + keyOffset, err = findKeyStart(data[startOffset:endOffset], keys[lk-1]) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + keyOffset += startOffset + _, _, _, subEndOffset, _ := internalGet(data[startOffset:endOffset], keys[lk-1]) + endOffset = startOffset + subEndOffset + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == " "[0] && len(data) > endOffset+tokEnd+1 && data[endOffset+tokEnd+1] == ","[0] { + endOffset += tokEnd + 2 + } else if data[endOffset+tokEnd] == "}"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } else { + _, _, keyOffset, endOffset, err = internalGet(data, keys...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == "]"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } + + // We need to remove remaining trailing comma if we delete las element in the object + prevTok := lastToken(data[:keyOffset]) + remainedValue := data[endOffset:] + + var newOffset int + if nextToken(remainedValue) > -1 && remainedValue[nextToken(remainedValue)] == '}' && data[prevTok] == ',' { + newOffset = prevTok + } else { + newOffset = prevTok + 1 + } + + // We have to make a copy here if we don't want to mangle the original data, because byte slices are + // accessed by reference and not by value + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = append(dataCopy[:newOffset], dataCopy[endOffset:]...) + + return data +} + +/* + +Set - Receives existing data structure, path to set, and data to set at that key. + +Returns: +`value` - modified byte array +`err` - On any parsing error + +*/ +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { + // ensure keys are set + if len(keys) == 0 { + return nil, KeyPathNotFoundError + } + + _, _, startOffset, endOffset, err := internalGet(data, keys...) + if err != nil { + if err != KeyPathNotFoundError { + // problem parsing the data + return nil, err + } + // full path doesnt exist + // does any subpath exist? + var depth int + for i := range keys { + _, _, start, end, sErr := internalGet(data, keys[:i+1]...) + if sErr != nil { + break + } else { + endOffset = end + startOffset = start + depth++ + } + } + comma := true + object := false + if endOffset == -1 { + firstToken := nextToken(data) + // We can't set a top-level key if data isn't an object + if firstToken < 0 || data[firstToken] != '{' { + return nil, KeyPathNotFoundError + } + // Don't need a comma if the input is an empty object + secondToken := firstToken + 1 + nextToken(data[firstToken+1:]) + if data[secondToken] == '}' { + comma = false + } + // Set the top level key at the end (accounting for any trailing whitespace) + // This assumes last token is valid like '}', could check and return error + endOffset = lastToken(data) + } + depthOffset := endOffset + if depth != 0 { + // if subpath is a non-empty object, add to it + // or if subpath is a non-empty array, add to it + if (data[startOffset] == '{' && data[startOffset+1+nextToken(data[startOffset+1:])] != '}') || + (data[startOffset] == '[' && data[startOffset+1+nextToken(data[startOffset+1:])] == '{') && keys[depth:][0][0] == 91 { + depthOffset-- + startOffset = depthOffset + // otherwise, over-write it with a new object + } else { + comma = false + object = true + } + } else { + startOffset = depthOffset + } + value = append(data[:startOffset], append(createInsertComponent(keys[depth:], setValue, comma, object), data[depthOffset:]...)...) + } else { + // path currently exists + startComponent := data[:startOffset] + endComponent := data[endOffset:] + + value = make([]byte, len(startComponent)+len(endComponent)+len(setValue)) + newEndOffset := startOffset + len(setValue) + copy(value[0:startOffset], startComponent) + copy(value[startOffset:newEndOffset], setValue) + copy(value[newEndOffset:], endComponent) + } + return value, nil +} + +func getType(data []byte, offset int) ([]byte, ValueType, int, error) { + var dataType ValueType + endOffset := offset + + // if string value + if data[offset] == '"' { + dataType = String + if idx, _ := stringEnd(data[offset+1:]); idx != -1 { + endOffset += idx + 1 + } else { + return nil, dataType, offset, MalformedStringError + } + } else if data[offset] == '[' { // if array value + dataType = Array + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '[', ']') + + if endOffset == -1 { + return nil, dataType, offset, MalformedArrayError + } + + endOffset += offset + } else if data[offset] == '{' { // if object value + dataType = Object + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '{', '}') + + if endOffset == -1 { + return nil, dataType, offset, MalformedObjectError + } + + endOffset += offset + } else { + // Number, Boolean or None + end := tokenEnd(data[endOffset:]) + + if end == -1 { + return nil, dataType, offset, MalformedValueError + } + + value := data[offset : endOffset+end] + + switch data[offset] { + case 't', 'f': // true or false + if bytes.Equal(value, trueLiteral) || bytes.Equal(value, falseLiteral) { + dataType = Boolean + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case 'u', 'n': // undefined or null + if bytes.Equal(value, nullLiteral) { + dataType = Null + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + dataType = Number + default: + return nil, Unknown, offset, UnknownValueTypeError + } + + endOffset += end + } + return data[offset:endOffset], dataType, endOffset, nil +} + +/* +Get - Receives data structure, and key path to extract value from. + +Returns: +`value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +`dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +`offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +`err` - If key not found or any other parsing issue it should return error. If key not found it also sets `dataType` to `NotExist` + +Accept multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys provided it will try to extract closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. +*/ +func Get(data []byte, keys ...string) (value []byte, dataType ValueType, offset int, err error) { + a, b, _, d, e := internalGet(data, keys...) + return a, b, d, e +} + +func internalGet(data []byte, keys ...string) (value []byte, dataType ValueType, offset, endOffset int, err error) { + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return nil, NotExist, -1, -1, KeyPathNotFoundError + } + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return nil, NotExist, offset, -1, MalformedJsonError + } + + offset += nO + value, dataType, endOffset, err = getType(data, offset) + if err != nil { + return value, dataType, offset, endOffset, err + } + + // Strip quotes from string values + if dataType == String { + value = value[1 : len(value)-1] + } + + return value[:len(value):len(value)], dataType, offset, endOffset, nil +} + +// ArrayEach is used when iterating arrays, accepts a callback function with the same return arguments as `Get`. +func ArrayEach(data []byte, cb func(value []byte, dataType ValueType, offset int, err error), keys ...string) (offset int, err error) { + if len(data) == 0 { + return -1, MalformedObjectError + } + + nT := nextToken(data) + if nT == -1 { + return -1, MalformedJsonError + } + + offset = nT + 1 + + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return offset, KeyPathNotFoundError + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] != '[' { + return offset, MalformedArrayError + } + + offset++ + } + + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] == ']' { + return offset, nil + } + + for true { + v, t, o, e := Get(data[offset:]) + + if e != nil { + return offset, e + } + + if o == 0 { + break + } + + if t != NotExist { + cb(v, t, offset+o-len(v), e) + } + + if e != nil { + break + } + + offset += o + + skipToToken := nextToken(data[offset:]) + if skipToToken == -1 { + return offset, MalformedArrayError + } + offset += skipToToken + + if data[offset] == ']' { + break + } + + if data[offset] != ',' { + return offset, MalformedArrayError + } + + offset++ + } + + return offset, nil +} + +// ObjectEach iterates over the key-value pairs of a JSON object, invoking a given callback for each such entry +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) { + offset := 0 + + // Descend to the desired key, if requested + if len(keys) > 0 { + if off := searchKeys(data, keys...); off == -1 { + return KeyPathNotFoundError + } else { + offset = off + } + } + + // Validate and skip past opening brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedObjectError + } else if offset += off; data[offset] != '{' { + return MalformedObjectError + } else { + offset++ + } + + // Skip to the first token inside the object, or stop if we find the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] == '}' { + return nil + } + + // Loop pre-condition: data[offset] points to what should be either the next entry's key, or the closing brace (if it's anything else, the JSON is malformed) + for offset < len(data) { + // Step 1: find the next key + var key []byte + + // Check what the the next token is: start of string, end of object, or something else (error) + switch data[offset] { + case '"': + offset++ // accept as string and skip opening quote + case '}': + return nil // we found the end of the object; stop and return success + default: + return MalformedObjectError + } + + // Find the end of the key string + var keyEscaped bool + if off, esc := stringEnd(data[offset:]); off == -1 { + return MalformedJsonError + } else { + key, keyEscaped = data[offset:offset+off-1], esc + offset += off + } + + // Unescape the string if needed + if keyEscaped { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if keyUnescaped, err := Unescape(key, stackbuf[:]); err != nil { + return MalformedStringEscapeError + } else { + key = keyUnescaped + } + } + + // Step 2: skip the colon + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] != ':' { + return MalformedJsonError + } else { + offset++ + } + + // Step 3: find the associated value, then invoke the callback + if value, valueType, off, err := Get(data[offset:]); err != nil { + return err + } else if err := callback(key, value, valueType, offset+off); err != nil { // Invoke the callback here! + return err + } else { + offset += off + } + + // Step 4: skip over the next comma to the following token, or stop if we hit the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + switch data[offset] { + case '}': + return nil // Stop if we hit the close brace + case ',': + offset++ // Ignore the comma + default: + return MalformedObjectError + } + } + + // Skip to the next token after the comma + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + } + } + + return MalformedObjectError // we shouldn't get here; it's expected that we will return via finding the ending brace +} + +// GetUnsafeString returns the value retrieved by `Get`, use creates string without memory allocation by mapping string to slice memory. It does not handle escape symbols. +func GetUnsafeString(data []byte, keys ...string) (val string, err error) { + v, _, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + return bytesToString(&v), nil +} + +// GetString returns the value retrieved by `Get`, cast to a string if possible, trying to properly handle escape and utf8 symbols +// If key data type do not match, it will return an error. +func GetString(data []byte, keys ...string) (val string, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + if t != String { + return "", fmt.Errorf("Value is not a string: %s", string(v)) + } + + // If no escapes return raw content + if bytes.IndexByte(v, '\\') == -1 { + return string(v), nil + } + + return ParseString(v) +} + +// GetFloat returns the value retrieved by `Get`, cast to a float64 if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return an error. +func GetFloat(data []byte, keys ...string) (val float64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseFloat(v) +} + +// GetInt returns the value retrieved by `Get`, cast to a int64 if possible. +// If key data type do not match, it will return an error. +func GetInt(data []byte, keys ...string) (val int64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseInt(v) +} + +// GetBoolean returns the value retrieved by `Get`, cast to a bool if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return error. +func GetBoolean(data []byte, keys ...string) (val bool, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return false, e + } + + if t != Boolean { + return false, fmt.Errorf("Value is not a boolean: %s", string(v)) + } + + return ParseBoolean(v) +} + +// ParseBoolean parses a Boolean ValueType into a Go bool (not particularly useful, but here for completeness) +func ParseBoolean(b []byte) (bool, error) { + switch { + case bytes.Equal(b, trueLiteral): + return true, nil + case bytes.Equal(b, falseLiteral): + return false, nil + default: + return false, MalformedValueError + } +} + +// ParseString parses a String ValueType into a Go string (the main parsing work is unescaping the JSON string) +func ParseString(b []byte) (string, error) { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if bU, err := Unescape(b, stackbuf[:]); err != nil { + return "", MalformedValueError + } else { + return string(bU), nil + } +} + +// ParseNumber parses a Number ValueType into a Go float64 +func ParseFloat(b []byte) (float64, error) { + if v, err := parseFloat(&b); err != nil { + return 0, MalformedValueError + } else { + return v, nil + } +} + +// ParseInt parses a Number ValueType into a Go int64 +func ParseInt(b []byte) (int64, error) { + if v, ok, overflow := parseInt(b); !ok { + if overflow { + return 0, OverflowIntegerError + } + return 0, MalformedValueError + } else { + return v, nil + } +} diff --git a/vendor/github.com/cenk/backoff/.gitignore b/vendor/github.com/cenk/backoff/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/cenk/backoff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenk/backoff/.travis.yml b/vendor/github.com/cenk/backoff/.travis.yml new file mode 100644 index 00000000000..47a6a46ec2a --- /dev/null +++ b/vendor/github.com/cenk/backoff/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenk/backoff/LICENSE b/vendor/github.com/cenk/backoff/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenk/backoff/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenk/backoff/README.md b/vendor/github.com/cenk/backoff/README.md new file mode 100644 index 00000000000..55ebc98fc25 --- /dev/null +++ b/vendor/github.com/cenk/backoff/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +See https://godoc.org/github.com/cenkalti/backoff#pkg-examples + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenk/backoff/backoff.go b/vendor/github.com/cenk/backoff/backoff.go new file mode 100644 index 00000000000..3676ee405d8 --- /dev/null +++ b/vendor/github.com/cenk/backoff/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenk/backoff/context.go b/vendor/github.com/cenk/backoff/context.go new file mode 100644 index 00000000000..7706faa2b60 --- /dev/null +++ b/vendor/github.com/cenk/backoff/context.go @@ -0,0 +1,63 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func ensureContext(b BackOff) BackOffContext { + if cb, ok := b.(BackOffContext); ok { + return cb + } + return WithContext(b, context.Background()) +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { + return Stop + } + return next +} diff --git a/vendor/github.com/cenk/backoff/exponential.go b/vendor/github.com/cenk/backoff/exponential.go new file mode 100644 index 00000000000..a031a659799 --- /dev/null +++ b/vendor/github.com/cenk/backoff/exponential.go @@ -0,0 +1,153 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenk/backoff/retry.go b/vendor/github.com/cenk/backoff/retry.go new file mode 100644 index 00000000000..e936a506f84 --- /dev/null +++ b/vendor/github.com/cenk/backoff/retry.go @@ -0,0 +1,82 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + var t *time.Timer + + cb := ensureContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = cb.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + if t == nil { + t = time.NewTimer(next) + defer t.Stop() + } else { + t.Reset(next) + } + + select { + case <-cb.Context().Done(): + return err + case <-t.C: + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenk/backoff/ticker.go b/vendor/github.com/cenk/backoff/ticker.go new file mode 100644 index 00000000000..e41084b0eff --- /dev/null +++ b/vendor/github.com/cenk/backoff/ticker.go @@ -0,0 +1,82 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOffContext + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: ensureContext(b), + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.b.Context().Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cenk/backoff/tries.go b/vendor/github.com/cenk/backoff/tries.go new file mode 100644 index 00000000000..cfeefd9b764 --- /dev/null +++ b/vendor/github.com/cenk/backoff/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml new file mode 100644 index 00000000000..871150c4672 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.12 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 00000000000..cabfc9c7017 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,33 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +godoc.org does not support modules yet, +so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 00000000000..3676ee405d8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 00000000000..fcff86c1b3d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,66 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 00000000000..3d3453215bb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,158 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/go.mod b/vendor/github.com/cenkalti/backoff/v4/go.mod new file mode 100644 index 00000000000..cef50ea6724 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v4 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 00000000000..6c776ccf8ed --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,96 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 00000000000..df9d68bce52 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 00000000000..8120d0213c5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 00000000000..28d58ca37c6 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/certifi/gocertifi/LICENSE b/vendor/github.com/certifi/gocertifi/LICENSE new file mode 100644 index 00000000000..cfd5dcbbb15 --- /dev/null +++ b/vendor/github.com/certifi/gocertifi/LICENSE @@ -0,0 +1,3 @@ +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. diff --git a/vendor/github.com/certifi/gocertifi/README.md b/vendor/github.com/certifi/gocertifi/README.md new file mode 100644 index 00000000000..c8bc9f629ea --- /dev/null +++ b/vendor/github.com/certifi/gocertifi/README.md @@ -0,0 +1,60 @@ +# GoCertifi: SSL Certificates for Golang + +This Go package contains a CA bundle that you can reference in your Go code. +This is useful for systems that do not have CA bundles that Golang can find +itself, or where a uniform set of CAs is valuable. + +This is the same CA bundle that ships with the +[Python Requests](https://github.com/kennethreitz/requests) library, and is a +Golang specific port of [certifi](https://github.com/kennethreitz/certifi). The +CA bundle is derived from Mozilla's canonical set. + +## Usage + +You can use the `gocertifi` package as follows: + +```go +import "github.com/certifi/gocertifi" + +cert_pool, err := gocertifi.CACerts() +``` + +You can use the returned `*x509.CertPool` as part of an HTTP transport, for example: + +```go +import ( + "net/http" + "crypto/tls" +) + +// Setup an HTTP client with a custom transport +transport := &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: cert_pool}, +} +client := &http.Client{Transport: transport} + +// Make an HTTP request using our custom transport +resp, err := client.Get("https://example.com") +``` + +## Detailed Documentation + +Import as follows: + +```go +import "github.com/certifi/gocertifi" +``` + +### Errors + +```go +var ErrParseFailed = errors.New("gocertifi: error when parsing certificates") +``` + +### Functions + +```go +func CACerts() (*x509.CertPool, error) +``` +CACerts builds an X.509 certificate pool containing the Mozilla CA Certificate +bundle. Returns nil on error along with an appropriate error code. diff --git a/vendor/github.com/certifi/gocertifi/certifi.go b/vendor/github.com/certifi/gocertifi/certifi.go new file mode 100644 index 00000000000..a152a0d4496 --- /dev/null +++ b/vendor/github.com/certifi/gocertifi/certifi.go @@ -0,0 +1,4680 @@ +// Code generated by go generate; DO NOT EDIT. +// 2019-04-09 17:53:46.117677 -0700 PDT m=+1.272386477 +// https://mkcert.org/generate/ + +package gocertifi + +//go:generate go run gen.go + +import "crypto/x509" + +const pemcerts string = ` + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Label: "QuoVadis Root CA" +# Serial: 985026699 +# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 +# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 +# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=Sonera Class2 CA O=Sonera +# Subject: CN=Sonera Class2 CA O=Sonera +# Label: "Sonera Class 2 Root CA" +# Serial: 29 +# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb +# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 +# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: O=Government Root Certification Authority +# Subject: O=Government Root Certification Authority +# Label: "Taiwan GRCA" +# Serial: 42023070807708724159991140556527066870 +# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e +# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 +# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=Class 2 Primary CA O=Certplus +# Subject: CN=Class 2 Primary CA O=Certplus +# Label: "Certplus Class 2 Primary CA" +# Serial: 177770208045934040241468760488327595043 +# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b +# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb +# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Label: "DST Root CA X3" +# Serial: 91299735575339953335919266965803778155 +# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 +# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 +# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GA CA" +# Serial: 86718877871133159090080555911823548314 +# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 +# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 +# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Label: "Deutsche Telekom Root CA 2" +# Serial: 38 +# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 +# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf +# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Label: "NetLock Arany (Class Gold) Főtanúsítvány" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G2" +# Serial: 10000012 +# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a +# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 +# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Label: "Chambers of Commerce Root - 2008" +# Serial: 11806822484801597146 +# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 +# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c +# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Label: "Global Chambersign Root - 2008" +# Serial: 14541511773111788494 +# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 +# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c +# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: O=Trustis Limited OU=Trustis FPS Root CA +# Subject: O=Trustis Limited OU=Trustis FPS Root CA +# Label: "Trustis FPS Root CA" +# Serial: 36053640375399034304724988975563710553 +# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d +# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 +# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Label: "EE Certification Centre Root CA" +# Serial: 112324828676200291871926431888494945866 +# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f +# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 +# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 14367148294922964480859022125800977897474 +# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e +# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb +# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G3" +# Serial: 10003001 +# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 +# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc +# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Root CA" +# Serial: 1 +# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f +# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 +# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Label: "LuxTrust Global Root 2" +# Serial: 59914338225734147123941058376788110305822489521 +# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c +# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f +# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5 +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 146587175971765017618439757810265552097 +# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 +# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 +# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX +mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 +zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P +fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc +vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 +Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp +zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO +Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW +k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ +DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF +lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW +Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 +d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z +XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR +gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 +d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv +J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg +DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM ++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy +F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 +SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws +E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 146587176055767053814479386953112547951 +# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b +# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d +# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg +GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu +XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd +re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu +PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 +mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K +8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj +x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR +nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 +kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok +twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp +8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT +vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT +z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA +pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb +pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB +R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R +RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk +0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC +5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF +izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn +yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 146587176140553309517047991083707763997 +# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 +# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 +# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 +-----BEGIN CERTIFICATE----- +MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A +DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk +fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA +njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 146587176229350439916519468929765261721 +# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 +# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb +# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd +-----BEGIN CERTIFICATE----- +MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l +xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 +CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx +sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +` + +// CACerts builds an X.509 certificate pool containing the Mozilla CA +// Certificate bundle. Returns nil on error along with an appropriate error +// code. +func CACerts() (*x509.CertPool, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM([]byte(pemcerts)) + return pool, nil +} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 00000000000..792b4a60b34 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,69 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 00000000000..49f67608bf6 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 00000000000..15c835d5417 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,235 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 00000000000..ad14b807f4d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 00000000000..be8db5bf796 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// DI prime4v + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 00000000000..4a5a821603e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 00000000000..fc9bea7a31f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 00000000000..376e0ca2e49 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,57 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/clbanning/mxj/LICENSE b/vendor/github.com/clbanning/mxj/LICENSE new file mode 100644 index 00000000000..f27bccdf06e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/LICENSE @@ -0,0 +1,55 @@ +Copyright (c) 2012-2016 Charles Banning . All rights reserved. + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +=============================================================================== + +Go Language Copyright & License - + +Copyright 2009 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/clbanning/mxj/anyxml.go b/vendor/github.com/clbanning/mxj/anyxml.go new file mode 100644 index 00000000000..ec2f3dfddac --- /dev/null +++ b/vendor/github.com/clbanning/mxj/anyxml.go @@ -0,0 +1,189 @@ +package mxj + +import ( + "encoding/xml" + "reflect" +) + +const ( + DefaultElementTag = "element" +) + +// Encode arbitrary value as XML. +// +// Note: unmarshaling the resultant +// XML may not return the original value, since tag labels may have been injected +// to create the XML representation of the value. +/* + Encode an arbitrary JSON object. + package main + + import ( + "encoding/json" + "fmt" + "github.com/clbanning/mxj" + ) + + func main() { + jsondata := []byte(`[ + { "somekey":"somevalue" }, + "string", + 3.14159265, + true + ]`) + var i interface{} + err := json.Unmarshal(jsondata, &i) + if err != nil { + // do something + } + x, err := mxj.AnyXmlIndent(i, "", " ", "mydoc") + if err != nil { + // do something else + } + fmt.Println(string(x)) + } + + output: + + somevalue + string + 3.14159265 + true + +*/ +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXml( v, myRootTag, myElementTag). +func AnyXml(v interface{}, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte("<" + rt + ">"), nil + } + return []byte("<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.Marshal(v) + } + + var err error + s := new(string) + p := new(pretty) + + var ss string + var b []byte + switch v.(type) { + case []interface{}: + ss = "<" + rt + ">" + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = mapToXmlIndent(false, s, tag, val, p) + } + } else { + err = mapToXmlIndent(false, s, et, vv, p) + } + default: + err = mapToXmlIndent(false, s, et, vv, p) + } + if err != nil { + break + } + } + ss += *s + "" + b = []byte(ss) + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.Xml(rt) + default: + err = mapToXmlIndent(false, s, rt, v, p) + b = []byte(*s) + } + + return b, err +} + +// Encode an arbitrary value as a pretty XML string. +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXmlIndent( v, "", " ", myRootTag, myElementTag). +func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte(prefix + "<" + rt + ">"), nil + } + return []byte(prefix + "<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.MarshalIndent(v, prefix, indent) + } + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + var ss string + var b []byte + switch v.(type) { + case []interface{}: + ss = "<" + rt + ">\n" + p.Indent() + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = mapToXmlIndent(true, s, tag, val, p) + } + } else { + p.start = 1 // we 1 tag in + err = mapToXmlIndent(true, s, et, vv, p) + *s += "\n" + } + default: + p.start = 0 // in case trailing p.start = 1 + err = mapToXmlIndent(true, s, et, vv, p) + } + if err != nil { + break + } + } + ss += *s + "" + b = []byte(ss) + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.XmlIndent(prefix, indent, rt) + default: + err = mapToXmlIndent(true, s, rt, v, p) + b = []byte(*s) + } + + return b, err +} diff --git a/vendor/github.com/clbanning/mxj/atomFeedString.xml b/vendor/github.com/clbanning/mxj/atomFeedString.xml new file mode 100644 index 00000000000..474575a41ca --- /dev/null +++ b/vendor/github.com/clbanning/mxj/atomFeedString.xml @@ -0,0 +1,54 @@ + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + diff --git a/vendor/github.com/clbanning/mxj/doc.go b/vendor/github.com/clbanning/mxj/doc.go new file mode 100644 index 00000000000..8ed79a5a77a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/doc.go @@ -0,0 +1,134 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2015, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +/* +Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package. The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them. + +Note: this library was designed for processing ad hoc anonymous messages. Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly. + +Related Packages: + checkxml: github.com/clbanning/checkxml provides functions for validating XML data. + +Notes: + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + +SUMMARY + + type Map map[string]interface{} + + Create a Map value, 'mv', from any map[string]interface{} value, 'v': + mv := Map(v) + + Unmarshal / marshal XML as a Map value, 'mv': + mv, err := NewMapXml(xmlValue) // unmarshal + xmlValue, err := mv.Xml() // marshal + + Unmarshal XML from an io.Reader as a Map value, 'mv': + mv, err := NewMapXmlReader(xmlReader) // repeated calls, as with an os.File Reader, will process stream + mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded + + Marshal Map value, 'mv', to an XML Writer (io.Writer): + err := mv.XmlWriter(xmlWriter) + raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter + + Also, for prettified output: + xmlValue, err := mv.XmlIndent(prefix, indent, ...) + err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...) + raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...) + + Bulk process XML with error handling (note: handlers must return a boolean value): + err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error)) + err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte)) + + Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader. + + There are comparable functions and methods for JSON processing. + + Arbitrary structure values can be decoded to / encoded from Map values: + mv, err := NewMapStruct(structVal) + err := mv.Struct(structPointer) + + To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON + or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then: + paths := mv.PathsForKey(key) + path := mv.PathForKeyShortest(key) + values, err := mv.ValuesForKey(key, subkeys) + values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays. + count, err := mv.UpdateValuesForPath(newVal, path, subkeys) + + Get everything at once, irrespective of path depth: + leafnodes := mv.LeafNodes() + leafvalues := mv.LeafValues() + + A new Map with whatever keys are desired can be created from the current Map and then encoded in XML + or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.) + newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N") + newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go" + newXml, err := newMap.Xml() // for example + newJson, err := newMap.Json() // ditto + +XML PARSING CONVENTIONS + + Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + + Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - something parses to map["ns:key"]interface{}{"something"} + - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"} + + Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +XML ENCODING CONVENTIONS + + - 'nil' Map values, which may represent 'null' JSON values, are encoded as "". + NOTE: the operation is not symmetric as "" elements are decoded as 'tag:""' Map values, + which, then, encode in JSON as '"tag":""' values.. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +*/ +package mxj diff --git a/vendor/github.com/clbanning/mxj/escapechars.go b/vendor/github.com/clbanning/mxj/escapechars.go new file mode 100644 index 00000000000..bee0442c9c1 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/escapechars.go @@ -0,0 +1,54 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" +) + +var xmlEscapeChars bool + +// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values. +// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is +// then '&' will be re-escaped as '&amp;'. +// +/* + The values are: + " " + ' ' + < < + > > + & & +*/ +func XMLEscapeChars(b bool) { + xmlEscapeChars = b +} + +// Scan for '&' first, since 's' may contain "&" that is parsed to "&amp;" +// - or "<" that is parsed to "&lt;". +var escapechars = [][2][]byte{ + {[]byte(`&`), []byte(`&`)}, + {[]byte(`<`), []byte(`<`)}, + {[]byte(`>`), []byte(`>`)}, + {[]byte(`"`), []byte(`"`)}, + {[]byte(`'`), []byte(`'`)}, +} + +func escapeChars(s string) string { + if len(s) == 0 { + return s + } + + b := []byte(s) + for _, v := range escapechars { + n := bytes.Count(b, v[0]) + if n == 0 { + continue + } + b = bytes.Replace(b, v[0], v[1], n) + } + return string(b) +} + diff --git a/vendor/github.com/clbanning/mxj/exists.go b/vendor/github.com/clbanning/mxj/exists.go new file mode 100644 index 00000000000..2fb3084b599 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/exists.go @@ -0,0 +1,7 @@ +package mxj + +// Checks whether the path exists +func (mv Map) Exists(path string, subkeys ...string) bool { + v, err := mv.ValuesForPath(path, subkeys...) + return err == nil && len(v) > 0 +} diff --git a/vendor/github.com/clbanning/mxj/files.go b/vendor/github.com/clbanning/mxj/files.go new file mode 100644 index 00000000000..27e06e1e801 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files.go @@ -0,0 +1,287 @@ +package mxj + +import ( + "fmt" + "io" + "os" +) + +type Maps []Map + +func NewMaps() Maps { + return make(Maps, 0) +} + +type MapRaw struct { + M Map + R []byte +} + +// NewMapsFromXmlFile - creates an array from a file of JSON values. +func NewMapsFromJsonFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values. +func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFile - creates an array from a file of XML values. +func NewMapsFromXmlFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values. +// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw(). +// It is slow at parsing a file from disk and is intended for relatively small utility files. +func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ------------------------ Maps writing ------------------------- +// These are handy-dandy methods for dumping configuration data, etc. + +// JsonString - analogous to mv.Json() +func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) { + var s string + for _, v := range mvs { + j, err := v.Json() + if err != nil { + return s, err + } + s += string(j) + } + return s, nil +} + +// JsonStringIndent - analogous to mv.JsonIndent() +func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) { + var s string + var haveFirst bool + for _, v := range mvs { + j, err := v.JsonIndent(prefix, indent) + if err != nil { + return s, err + } + if haveFirst { + s += "\n" + } else { + haveFirst = true + } + s += string(j) + } + return s, nil +} + +// XmlString - analogous to mv.Xml() +func (mvs Maps) XmlString() (string, error) { + var s string + for _, v := range mvs { + x, err := v.Xml() + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// XmlStringIndent - analogous to mv.XmlIndent() +func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) { + var s string + for _, v := range mvs { + x, err := v.XmlIndent(prefix, indent) + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// JsonFile - write Maps to named file as JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonWriter method. +func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonString(encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// JsonFileIndent - write Maps to named file as pretty JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonIndentWriter method. +func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonStringIndent(prefix, indent, encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFile - write Maps to named file as XML +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlWriter method. +func (mvs Maps) XmlFile(file string) error { + s, err := mvs.XmlString() + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFileIndent - write Maps to named file as pretty XML +// Note: the file will be created,if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlIndentWriter method. +func (mvs Maps) XmlFileIndent(file, prefix, indent string) error { + s, err := mvs.XmlStringIndent(prefix, indent) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} diff --git a/vendor/github.com/clbanning/mxj/files_test.badjson b/vendor/github.com/clbanning/mxj/files_test.badjson new file mode 100644 index 00000000000..d18720044ac --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.badjson @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"some", "bad":JSON, "in":"it" } diff --git a/vendor/github.com/clbanning/mxj/files_test.badxml b/vendor/github.com/clbanning/mxj/files_test.badxml new file mode 100644 index 00000000000..4736ef973dd --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.badxml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/files_test.json b/vendor/github.com/clbanning/mxj/files_test.json new file mode 100644 index 00000000000..e9a3ddf40ec --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.json @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"just", "two":2, "JSON":"values", "true":true } diff --git a/vendor/github.com/clbanning/mxj/files_test.xml b/vendor/github.com/clbanning/mxj/files_test.xml new file mode 100644 index 00000000000..65cf021fb70 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.xml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.json b/vendor/github.com/clbanning/mxj/files_test_dup.json new file mode 100644 index 00000000000..2becb6a4512 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_dup.json @@ -0,0 +1 @@ +{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.xml b/vendor/github.com/clbanning/mxj/files_test_dup.xml new file mode 100644 index 00000000000..f68d22e28ea --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_dup.xml @@ -0,0 +1 @@ +for files.gotestdoctest casesome \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.json b/vendor/github.com/clbanning/mxj/files_test_indent.json new file mode 100644 index 00000000000..6fde15634df --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_indent.json @@ -0,0 +1,12 @@ +{ + "a": "test", + "file": "for", + "files_test.go": "case", + "this": "is" +} +{ + "JSON": "values", + "true": true, + "two": 2, + "with": "just" +} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.xml b/vendor/github.com/clbanning/mxj/files_test_indent.xml new file mode 100644 index 00000000000..8c91a1dc20a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_indent.xml @@ -0,0 +1,8 @@ + + for files.go + test + + doc + test case + some + \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/gob.go b/vendor/github.com/clbanning/mxj/gob.go new file mode 100644 index 00000000000..d56c2fd6fe8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/gob.go @@ -0,0 +1,35 @@ +// gob.go - Encode/Decode a Map into a gob object. + +package mxj + +import ( + "bytes" + "encoding/gob" +) + +// NewMapGob returns a Map value for a gob object that has been +// encoded from a map[string]interface{} (or compatible type) value. +// It is intended to provide symmetric handling of Maps that have +// been encoded using mv.Gob. +func NewMapGob(gobj []byte) (Map, error) { + m := make(map[string]interface{}, 0) + if len(gobj) == 0 { + return m, nil + } + r := bytes.NewReader(gobj) + dec := gob.NewDecoder(r) + if err := dec.Decode(&m); err != nil { + return m, err + } + return m, nil +} + +// Gob returns a gob-encoded value for the Map 'mv'. +func (mv Map) Gob() ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(map[string]interface{}(mv)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/clbanning/mxj/json.go b/vendor/github.com/clbanning/mxj/json.go new file mode 100644 index 00000000000..eb2c05a1869 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/json.go @@ -0,0 +1,323 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "time" +) + +// ------------------------------ write JSON ----------------------- + +// Just a wrapper on json.Marshal. +// If option safeEncoding is'true' then safe encoding of '<', '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) Json(safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.Marshal(mv) + + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// Just a wrapper on json.MarshalIndent. +// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.MarshalIndent(mv, prefix, indent) + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// The following implementation is provided for symmetry with NewMapJsonReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error { + b, err := mv.Json(safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) { + b, err := mv.Json(safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// Writes the Map as pretty JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// --------------------------- read JSON ----------------------------- + +// Decode numericvalues as json.Number type Map values - see encoding/json#Number. +// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), +// etc.; it does not affect NewMapXml(), etc. The XML encoders mv.Xml() and mv.XmlIndent() +// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number +// value types and the resulting Map can be correctly encoded into a XML object. +var JsonUseNumber bool + +// Just a wrapper on json.Unmarshal +// Converting JSON to XML is a simple as: +// ... +// mapVal, merr := mxj.NewMapJson(jsonVal) +// if merr != nil { +// // handle error +// } +// xmlVal, xerr := mapVal.Xml() +// if xerr != nil { +// // handle error +// } +// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}], +// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map. +// See mxj/j2x/j2x_test.go. +func NewMapJson(jsonVal []byte) (Map, error) { + // empty or nil begets empty + if len(jsonVal) == 0 { + m := make(map[string]interface{}, 0) + return m, nil + } + // handle a goofy case ... + if jsonVal[0] == '[' { + jsonVal = []byte(`{"object":` + string(jsonVal) + `}`) + } + m := make(map[string]interface{}) + // err := json.Unmarshal(jsonVal, &m) + buf := bytes.NewReader(jsonVal) + dec := json.NewDecoder(buf) + if JsonUseNumber { + dec.UseNumber() + } + err := dec.Decode(&m) + return m, err +} + +// Retrieve a Map value from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object. +func NewMapJsonReader(jsonReader io.Reader) (Map, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, err + } + + // Unmarshal the 'presumed' JSON string + return NewMapJson(*jb) +} + +// Retrieve a Map value and raw JSON - []byte - from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object and retrieve the raw JSON in a single call. +func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, *jb, err + } + + // Unmarshal the 'presumed' JSON string + m, merr := NewMapJson(*jb) + return m, *jb, merr +} + +// Pull the next JSON string off the stream: just read from first '{' to its closing '}'. +// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package. +func getJson(rdr io.Reader) (*[]byte, error) { + bval := make([]byte, 1) + jb := make([]byte, 0) + var inQuote, inJson bool + var parenCnt int + var previous byte + + // scan the input for a matched set of {...} + // json.Unmarshal will handle syntax checking. + for { + _, err := rdr.Read(bval) + if err != nil { + if err == io.EOF && inJson && parenCnt > 0 { + return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb)) + } + return &jb, err + } + switch bval[0] { + case '{': + if !inQuote { + parenCnt++ + inJson = true + } + case '}': + if !inQuote { + parenCnt-- + } + if parenCnt < 0 { + return nil, fmt.Errorf("closing } without opening {: %s", string(jb)) + } + case '"': + if inQuote { + if previous == '\\' { + break + } + inQuote = false + } else { + inQuote = true + } + case '\n', '\r', '\t', ' ': + if !inQuote { + continue + } + } + if inJson { + jb = append(jb, bval[0]) + if parenCnt == 0 { + break + } + } + previous = bval[0] + } + + return &jb, nil +} + +// ------------------------------- JSON Reader handler via Map values ----------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var jhandlerPollInterval = time.Duration(1e6) + +// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader(). +// This avoids treating one or other as a special case and discussing the underlying stdlib logic. + +// Bulk process JSON using handlers that process a Map value. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapJsonReader(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process JSON using handlers that process a Map value and the raw JSON. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapJsonReaderRaw(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} diff --git a/vendor/github.com/clbanning/mxj/keyvalues.go b/vendor/github.com/clbanning/mxj/keyvalues.go new file mode 100644 index 00000000000..0b244c879ce --- /dev/null +++ b/vendor/github.com/clbanning/mxj/keyvalues.go @@ -0,0 +1,671 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters. + +package mxj + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +// ----------------------------- get everything FOR a single key ------------------------- + +const ( + minArraySize = 32 +) + +var defaultArraySize int = minArraySize + +// Adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath(). +// This can have the effect of significantly reducing memory allocation-copy functions for large data sets. +// Returns the initial buffer size. +func SetArraySize(size int) int { + if size > minArraySize { + defaultArraySize = size + } else { + defaultArraySize = minArraySize + } + return defaultArraySize +} + +// Return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match. +// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". +// - If the 'key' refers to a list, then "key:value" could select a list member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + ret := make([]interface{}, 0, defaultArraySize) + var cnt int + hasKey(m, key, &ret, &cnt, subKeyMap) + return ret[:cnt], nil +} + +var KeyNotExistError = errors.New("Key does not exist") + +// ValueForKey is a wrapper on ValuesForKey. It returns the first member of []interface{}, if any. +// If there is no value, "nil, nil" is returned. +func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) { + vals, err := mv.ValuesForKey(key, subkeys...) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, KeyNotExistError + } + return vals[0], nil +} + +// hasKey - if the map 'key' exists append it to array +// if it doesn't do nothing except scan array and map values +func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) { + // func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + // see if the current value is of interest + if v, ok := vv[key]; ok { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + + // wildcard case + if key == "*" { + for _, v := range vv { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + } + + // scan the rest + for _, v := range vv { + hasKey(v, key, ret, cnt, subkeys) + } + case []interface{}: + for _, v := range iv.([]interface{}) { + hasKey(v, key, ret, cnt, subkeys) + } + } +} + +// ----------------------- get everything for a node in the Map --------------------------- + +// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.) +// 2014.04.28 - implementation note. +// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion +// of wildcards and unindexed arrays. Embedding such logic into valuesForKeyPath() would have made the +// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead. + +// Retrieve all values for a path from the Map. If len(returned_values) == 0, then no match. +// On error, the returned array is 'nil'. +// 'path' is a dot-separated path of key values. +// - If a node in the path is '*', then everything beyond is walked. +// - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" - +// even "*[2].*[0].field". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". +// - If the 'path' refers to a list, then "tag:value" would return member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + // If there are no array indexes in path, use legacy ValuesForPath() logic. + if strings.Index(path, "[") < 0 { + return mv.oldValuesForPath(path, subkeys...) + } + + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys, kerr := parsePath(path) + if kerr != nil { + return nil, kerr + } + + vals, verr := valuesForArray(keys, mv) + if verr != nil { + return nil, verr // Vals may be nil, but return empty array. + } + + // Need to handle subkeys ... only return members of vals that satisfy conditions. + retvals := make([]interface{}, 0) + for _, v := range vals { + if hasSubKeys(v, subKeyMap) { + retvals = append(retvals, v) + } + } + return retvals, nil +} + +func valuesForArray(keys []*key, m Map) ([]interface{}, error) { + var tmppath string + var haveFirst bool + var vals []interface{} + var verr error + + lastkey := len(keys) - 1 + for i := 0; i <= lastkey; i++ { + if !haveFirst { + tmppath = keys[i].name + haveFirst = true + } else { + tmppath += "." + keys[i].name + } + + // Look-ahead: explode wildcards and unindexed arrays. + // Need to handle un-indexed list recursively: + // e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]". + // Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ... + if !keys[i].isArray && i < lastkey && keys[i+1].isArray { + // Can't pass subkeys because we may not be at literal end of path. + vv, vverr := m.oldValuesForPath(tmppath) + if vverr != nil { + return nil, vverr + } + for _, v := range vv { + // See if we can walk the value. + am, ok := v.(map[string]interface{}) + if !ok { + continue + } + // Work the backend. + nvals, nvalserr := valuesForArray(keys[i+1:], Map(am)) + if nvalserr != nil { + return nil, nvalserr + } + vals = append(vals, nvals...) + } + break // have recursed the whole path - return + } + + if keys[i].isArray || i == lastkey { + // Don't pass subkeys because may not be at literal end of path. + vals, verr = m.oldValuesForPath(tmppath) + } else { + continue + } + if verr != nil { + return nil, verr + } + + if i == lastkey && !keys[i].isArray { + break + } + + // Now we're looking at an array - supposedly. + // Is index in range of vals? + if len(vals) <= keys[i].position { + vals = nil + break + } + + // Return the array member of interest, if at end of path. + if i == lastkey { + vals = vals[keys[i].position:(keys[i].position + 1)] + break + } + + // Extract the array member of interest. + am := vals[keys[i].position:(keys[i].position + 1)] + + // must be a map[string]interface{} value so we can keep walking the path + amm, ok := am[0].(map[string]interface{}) + if !ok { + vals = nil + break + } + + m = Map(amm) + haveFirst = false + } + + return vals, nil +} + +type key struct { + name string + isArray bool + position int +} + +func parsePath(s string) ([]*key, error) { + keys := strings.Split(s, ".") + + ret := make([]*key, 0) + + for i := 0; i < len(keys); i++ { + if keys[i] == "" { + continue + } + + newkey := new(key) + if strings.Index(keys[i], "[") < 0 { + newkey.name = keys[i] + ret = append(ret, newkey) + continue + } + + p := strings.Split(keys[i], "[") + newkey.name = p[0] + p = strings.Split(p[1], "]") + if p[0] == "" { // no right bracket + return nil, fmt.Errorf("no right bracket on key index: %s", keys[i]) + } + // convert p[0] to a int value + pos, nerr := strconv.ParseInt(p[0], 10, 32) + if nerr != nil { + return nil, fmt.Errorf("cannot convert index to int value: %s", p[0]) + } + newkey.position = int(pos) + newkey.isArray = true + ret = append(ret, newkey) + } + + return ret, nil +} + +// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'. +func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys := strings.Split(path, ".") + if keys[len(keys)-1] == "" { + keys = keys[:len(keys)-1] + } + ivals := make([]interface{}, 0, defaultArraySize) + var cnt int + valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap) + return ivals[:cnt], nil +} + +func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) { + lenKeys := len(keys) + + // load 'm' values into 'ret' + // expand any lists + if lenKeys == 0 { + switch m.(type) { + case map[string]interface{}: + if subkeys != nil { + if ok := hasSubKeys(m, subkeys); !ok { + return + } + } + *ret = append(*ret, m) + *cnt++ + case []interface{}: + for i, v := range m.([]interface{}) { + if subkeys != nil { + if ok := hasSubKeys(v, subkeys); !ok { + continue // only load list members with subkeys + } + } + *ret = append(*ret, (m.([]interface{}))[i]) + *cnt++ + } + default: + if subkeys != nil { + return // must be map[string]interface{} if there are subkeys + } + *ret = append(*ret, m) + *cnt++ + } + return + } + + // key of interest + key := keys[0] + switch key { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + default: + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + } + } + } + } +} + +// hasSubKeys() - interface{} equality works for string, float64, bool +// 'v' must be a map[string]interface{} value to have subkeys +// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard. +func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool { + if len(subkeys) == 0 { + return true + } + + switch v.(type) { + case map[string]interface{}: + // do all subKey name:value pairs match? + mv := v.(map[string]interface{}) + for skey, sval := range subkeys { + isNotKey := false + if skey[:1] == "!" { // a NOT-key + skey = skey[1:] + isNotKey = true + } + vv, ok := mv[skey] + if !ok { // key doesn't exist + if isNotKey { // key not there, but that's what we want + if kv, ok := sval.(string); ok && kv == "*" { + continue + } + } + return false + } + // wildcard check + if kv, ok := sval.(string); ok && kv == "*" { + if isNotKey { // key is there, and we don't want it + return false + } + continue + } + switch sval.(type) { + case string: + if s, ok := vv.(string); ok && s == sval.(string) { + if isNotKey { + return false + } + continue + } + case bool: + if b, ok := vv.(bool); ok && b == sval.(bool) { + if isNotKey { + return false + } + continue + } + case float64: + if f, ok := vv.(float64); ok && f == sval.(float64) { + if isNotKey { + return false + } + continue + } + } + // key there but didn't match subkey value + if isNotKey { // that's what we want + continue + } + return false + } + // all subkeys matched + return true + } + + // not a map[string]interface{} value, can't have subkeys + return false +} + +// Generate map of key:value entries as map[string]string. +// 'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'. +// If len(kv) == 0, the return is (nil, nil). +func getSubKeyMap(kv ...string) (map[string]interface{}, error) { + if len(kv) == 0 { + return nil, nil + } + m := make(map[string]interface{}, 0) + for _, v := range kv { + vv := strings.Split(v, fieldSep) + switch len(vv) { + case 2: + m[vv[0]] = interface{}(vv[1]) + case 3: + switch vv[2] { + case "string", "char", "text": + m[vv[0]] = interface{}(vv[1]) + case "bool", "boolean": + // ParseBool treats "1"==true & "0"==false + b, err := strconv.ParseBool(vv[1]) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1]) + } + m[vv[0]] = interface{}(b) + case "float", "float64", "num", "number", "numeric": + f, err := strconv.ParseFloat(vv[1], 64) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1]) + } + m[vv[0]] = interface{}(f) + default: + return nil, fmt.Errorf("unknown subkey conversion spec: %s", v) + } + default: + return nil, fmt.Errorf("unknown subkey spec: %s", v) + } + } + return m, nil +} + +// ------------------------------- END of valuesFor ... ---------------------------- + +// ----------------------- locate where a key value is in the tree ------------------- + +//----------------------------- find all paths to a key -------------------------------- + +// Get all paths through Map, 'mv', (in dot-notation) that terminate with the specified key. +// Results can be used with ValuesForPath. +func (mv Map) PathsForKey(key string) []string { + m := map[string]interface{}(mv) + breadbasket := make(map[string]bool, 0) + breadcrumbs := "" + + hasKeyPath(breadcrumbs, m, key, breadbasket) + if len(breadbasket) == 0 { + return nil + } + + // unpack map keys to return + res := make([]string, len(breadbasket)) + var i int + for k := range breadbasket { + res[i] = k + i++ + } + + return res +} + +// Extract the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'.. +// Paths are strings using dot-notation. +func (mv Map) PathForKeyShortest(key string) string { + paths := mv.PathsForKey(key) + + lp := len(paths) + if lp == 0 { + return "" + } + if lp == 1 { + return paths[0] + } + + shortest := paths[0] + shortestLen := len(strings.Split(shortest, ".")) + + for i := 1; i < len(paths); i++ { + vlen := len(strings.Split(paths[i], ".")) + if vlen < shortestLen { + shortest = paths[i] + shortestLen = vlen + } + } + + return shortest +} + +// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth +// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'. +func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + if _, ok := vv[key]; ok { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = key + } else { + nbc = crumbs + "." + key + } + basket[nbc] = true + } + // walk on down the path, key could occur again at deeper node + for k, v := range vv { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = k + } else { + nbc = crumbs + "." + k + } + hasKeyPath(nbc, v, key, basket) + } + case []interface{}: + // crumb-trail doesn't change, pass it on + for _, v := range iv.([]interface{}) { + hasKeyPath(crumbs, v, key, basket) + } + } +} + +var PathNotExistError = errors.New("Path does not exist") + +// ValueForPath wrap ValuesFor Path and returns the first value returned. +// If no value is found it returns 'nil' and PathNotExistError. +func (mv Map) ValueForPath(path string) (interface{}, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, PathNotExistError + } + return vals[0], nil +} + +// Returns the first found value for the path as a string. +func (mv Map) ValueForPathString(path string) (string, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return "", err + } + if len(vals) == 0 { + return "", errors.New("ValueForPath: path not found") + } + val := vals[0] + switch str := val.(type) { + case string: + return str, nil + default: + return "", fmt.Errorf("ValueForPath: unsupported type: %T", str) + } +} + +// Returns the first found value for the path as a string. +// If the path is not found then it returns an empty string. +func (mv Map) ValueOrEmptyForPathString(path string) string { + str, _ := mv.ValueForPathString(path) + return str +} diff --git a/vendor/github.com/clbanning/mxj/leafnode.go b/vendor/github.com/clbanning/mxj/leafnode.go new file mode 100644 index 00000000000..cf413ebdd4f --- /dev/null +++ b/vendor/github.com/clbanning/mxj/leafnode.go @@ -0,0 +1,112 @@ +package mxj + +// leafnode.go - return leaf nodes with paths and values for the Map +// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw + +import ( + "strconv" + "strings" +) + +const ( + NoAttributes = true // suppress LeafNode values that are attributes +) + +// LeafNode - a terminal path value in a Map. +// For XML Map values it represents an attribute or simple element value - of type +// string unless Map was created using Cast flag. For JSON Map values it represents +// a string, numeric, boolean, or null value. +type LeafNode struct { + Path string // a dot-notation representation of the path with array subscripting + Value interface{} // the value at the path termination +} + +// LeafNodes - returns an array of all LeafNode values for the Map. +// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-') +// as well as the "#text" key for the associated simple element value. +// +// PrependAttrWithHypen(false) will result in attributes having .attr-name as +// terminal node in 'path' while the path for the element value, itself, will be +// the base path w/o "#text". +// +// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax +// rather than "[N]" syntax. +func (mv Map) LeafNodes(no_attr ...bool) []LeafNode { + var a bool + if len(no_attr) == 1 { + a = no_attr[0] + } + + l := make([]LeafNode, 0) + getLeafNodes("", "", map[string]interface{}(mv), &l, a) + return l +} + +func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) { + // if stripping attributes, then also strip "#text" key + if !noattr || node != "#text" { + if path != "" && node[:1] != "[" { + path += "." + } + path += node + } + switch mv.(type) { + case map[string]interface{}: + for k, v := range mv.(map[string]interface{}) { + // if noattr && k[:1] == "-" { + if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue + } + getLeafNodes(path, k, v, l, noattr) + } + case []interface{}: + for i, v := range mv.([]interface{}) { + if useDotNotation { + getLeafNodes(path, strconv.Itoa(i), v, l, noattr) + } else { + getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr) + } + } + default: + // can't walk any further, so create leaf + n := LeafNode{path, mv} + *l = append(*l, n) + } +} + +// LeafPaths - all paths that terminate in LeafNode values. +func (mv Map) LeafPaths(no_attr ...bool) []string { + ln := mv.LeafNodes() + ss := make([]string, len(ln)) + for i := 0; i < len(ln); i++ { + ss[i] = ln[i].Path + } + return ss +} + +// LeafValues - all terminal values in the Map. +func (mv Map) LeafValues(no_attr ...bool) []interface{} { + ln := mv.LeafNodes() + vv := make([]interface{}, len(ln)) + for i := 0; i < len(ln); i++ { + vv[i] = ln[i].Value + } + return vv +} + +// ====================== utilities ====================== + +// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I +var useDotNotation bool + +// LeafUseDotNotation sets a flag that list members in LeafNode paths +// should be identified using ".N" syntax rather than the default "[N]" +// syntax. Calling LeafUseDotNotation with no arguments toggles the +// flag on/off; otherwise, the argument sets the flag value 'true'/'false'. +func LeafUseDotNotation(b ...bool) { + if len(b) == 0 { + useDotNotation = !useDotNotation + return + } + useDotNotation = b[0] +} diff --git a/vendor/github.com/clbanning/mxj/misc.go b/vendor/github.com/clbanning/mxj/misc.go new file mode 100644 index 00000000000..5b4fab2165d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/misc.go @@ -0,0 +1,86 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// misc.go - mimic functions (+others) called out in: +// https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ +// Primarily these methods let you retrive XML structure information. + +package mxj + +import ( + "fmt" + "sort" + "strings" +) + +// Return the root element of the Map. If there is not a single key in Map, +// then an error is returned. +func (mv Map) Root() (string, error) { + mm := map[string]interface{}(mv) + if len(mm) != 1 { + return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm)) + } + for k, _ := range mm { + return k, nil + } + return "", nil +} + +// If the path is an element with sub-elements, return a list of the sub-element +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are prefixed with +// '-', a hyphen, are considered attributes; see m.Attributes(path). +func (mv Map) Elements(path string) ([]string, error) { + e, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch e.(type) { + case map[string]interface{}: + ee := e.(map[string]interface{}) + elems := make([]string, len(ee)) + var i int + for k, _ := range ee { + if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue // skip attributes + } + elems[i] = k + i++ + } + elems = elems[:i] + // alphabetic sort keeps things tidy + sort.Strings(elems) + return elems, nil + } + return nil, fmt.Errorf("no elements for path: %s", path) +} + +// If the path is an element with attributes, return a list of the attribute +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are not prefixed with +// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the +// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then +// there are no identifiable attributes. +func (mv Map) Attributes(path string) ([]string, error) { + a, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch a.(type) { + case map[string]interface{}: + aa := a.(map[string]interface{}) + attrs := make([]string, len(aa)) + var i int + for k, _ := range aa { + if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 { + continue // skip non-attributes + } + attrs[i] = k[len(attrPrefix):] + i++ + } + attrs = attrs[:i] + // alphabetic sort keeps things tidy + sort.Strings(attrs) + return attrs, nil + } + return nil, fmt.Errorf("no attributes for path: %s", path) +} diff --git a/vendor/github.com/clbanning/mxj/mxj.go b/vendor/github.com/clbanning/mxj/mxj.go new file mode 100644 index 00000000000..f0592f06c8e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/mxj.go @@ -0,0 +1,128 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "fmt" + "sort" +) + +const ( + Cast = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast) + SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding) +) + +type Map map[string]interface{} + +// Allocate a Map. +func New() Map { + m := make(map[string]interface{}, 0) + return m +} + +// Cast a Map to map[string]interface{} +func (mv Map) Old() map[string]interface{} { + return mv +} + +// Return a copy of mv as a newly allocated Map. If the Map only contains string, +// numeric, map[string]interface{}, and []interface{} values, then it can be thought +// of as a "deep copy." Copying a structure (or structure reference) value is subject +// to the noted restrictions. +// NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags +// then only public fields of the structure are in the new Map - and with +// keys that conform to any encoding tag instructions. The structure itself will +// be represented as a map[string]interface{} value. +func (mv Map) Copy() (Map, error) { + // this is the poor-man's deep copy + // not efficient, but it works + j, jerr := mv.Json() + // must handle, we don't know how mv got built + if jerr != nil { + return nil, jerr + } + return NewMapJson(j) +} + +// --------------- StringIndent ... from x2j.WriteMap ------------- + +// Pretty print a Map. +func (mv Map) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(mv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (mv Map) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(mv), false, true, offset...) +} + +// writeMap - dumps the map[string]interface{} for examination. +// 'typeInfo' causes value type to be printed. +// 'offset' is initial indentation count; typically: Write(m). +func writeMap(m interface{}, typeInfo, root bool, offset ...int) string { + var indent int + if len(offset) == 1 { + indent = offset[0] + } + + var s string + switch m.(type) { + case []interface{}: + if typeInfo { + s += "[[]interface{}]" + } + for _, v := range m.([]interface{}) { + s += "\n" + for i := 0; i < indent; i++ { + s += " " + } + s += writeMap(v, typeInfo, false, indent+1) + } + case map[string]interface{}: + list := make([][2]string, len(m.(map[string]interface{}))) + var n int + for k, v := range m.(map[string]interface{}) { + list[n][0] = k + list[n][1] = writeMap(v, typeInfo, false, indent+1) + n++ + } + sort.Sort(mapList(list)) + for _, v := range list { + if root { + root = false + } else { + s += "\n" + } + for i := 0; i < indent; i++ { + s += " " + } + s += v[0] + " : " + v[1] + } + default: + if typeInfo { + s += fmt.Sprintf("[%T] %+v", m, m) + } else { + s += fmt.Sprintf("%+v", m) + } + } + return s +} + +// ======================== utility =============== + +type mapList [][2]string + +func (ml mapList) Len() int { + return len(ml) +} + +func (ml mapList) Swap(i, j int) { + ml[i], ml[j] = ml[j], ml[i] +} + +func (ml mapList) Less(i, j int) bool { + return ml[i][0] <= ml[j][0] +} diff --git a/vendor/github.com/clbanning/mxj/newmap.go b/vendor/github.com/clbanning/mxj/newmap.go new file mode 100644 index 00000000000..b293949056d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/newmap.go @@ -0,0 +1,184 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings +// keys can use dot-notation, keyOld can use wildcard, '*' +// +// Computational strategy - +// Using the key path - []string - traverse a new map[string]interface{} and +// insert the oldVal as the newVal when we arrive at the end of the path. +// If the type at the end is nil, then that is newVal +// If the type at the end is a singleton (string, float64, bool) an array is created. +// If the type at the end is an array, newVal is just appended. +// If the type at the end is a map, it is inserted if possible or the map value +// is converted into an array if necessary. + +package mxj + +import ( + "errors" + "strings" +) + +// (Map)NewMap - create a new Map from data in the current Map. +// 'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey' +// should be the value for 'newKey' in the returned Map. +// - 'oldKey' supports dot-notation as described for (Map)ValuesForPath() +// - 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays +// - "oldKey" is shorthand for the keypair value "oldKey:oldKey" +// - "oldKey:" and ":newKey" are invalid keypair values +// - if 'oldKey' does not exist in the current Map, it is not written to the new Map. +// "null" is not supported unless it is the current Map. +// - see newmap_test.go for several syntax examples +// - mv.NewMap() == mxj.New() +// +// NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc. +func (mv Map) NewMap(keypairs ...string) (Map, error) { + n := make(map[string]interface{}, 0) + if len(keypairs) == 0 { + return n, nil + } + + // loop through the pairs + var oldKey, newKey string + var path []string + for _, v := range keypairs { + if len(v) == 0 { + continue // just skip over empty keypair arguments + } + + // initialize oldKey, newKey and check + vv := strings.Split(v, ":") + if len(vv) > 2 { + return n, errors.New("oldKey:newKey keypair value not valid - " + v) + } + if len(vv) == 1 { + oldKey, newKey = vv[0], vv[0] + } else { + oldKey, newKey = vv[0], vv[1] + } + strings.TrimSpace(oldKey) + strings.TrimSpace(newKey) + if i := strings.Index(newKey, "*"); i > -1 { + return n, errors.New("newKey value cannot contain wildcard character - " + v) + } + if i := strings.Index(newKey, "["); i > -1 { + return n, errors.New("newKey value cannot contain indexed arrays - " + v) + } + if oldKey == "" || newKey == "" { + return n, errors.New("oldKey or newKey is not specified - " + v) + } + + // get oldKey value + oldVal, err := mv.ValuesForPath(oldKey) + if err != nil { + return n, err + } + if len(oldVal) == 0 { + continue // oldKey has no value, may not exist in mv + } + + // break down path + path = strings.Split(newKey, ".") + if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec + path = path[:len(path)-1] + } + + addNewVal(&n, path, oldVal) + } + + return n, nil +} + +// navigate 'n' to end of path and add val +func addNewVal(n *map[string]interface{}, path []string, val []interface{}) { + // newVal - either singleton or array + var newVal interface{} + if len(val) == 1 { + newVal = val[0] // is type interface{} + } else { + newVal = interface{}(val) + } + + // walk to the position of interest, create it if necessary + m := (*n) // initialize map walker + var k string // key for m + lp := len(path) - 1 // when to stop looking + for i := 0; i < len(path); i++ { + k = path[i] + if i == lp { + break + } + var nm map[string]interface{} // holds position of next-map + switch m[k].(type) { + case nil: // need a map for next node in path, so go there + nm = make(map[string]interface{}, 0) + m[k] = interface{}(nm) + m = m[k].(map[string]interface{}) + case map[string]interface{}: + // OK - got somewhere to walk to, go there + m = m[k].(map[string]interface{}) + case []interface{}: + // add a map and nm points to new map unless there's already + // a map in the array, then nm points there + // The placement of the next value in the array is dependent + // on the sequence of members - could land on a map or a nil + // value first. TODO: how to test this. + a := make([]interface{}, 0) + var foundmap bool + for _, vv := range m[k].([]interface{}) { + switch vv.(type) { + case nil: // doesn't appear that this occurs, need a test case + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + foundmap = true + case map[string]interface{}: + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = vv.(map[string]interface{}) + a = append(a, vv) + foundmap = true + default: + a = append(a, vv) + } + } + // no map found in array + if !foundmap { + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + } + m[k] = interface{}(a) // must insert in map + m = nm + default: // it's a string, float, bool, etc. + aa := make([]interface{}, 0) + nm = make(map[string]interface{}, 0) + aa = append(aa, m[k], nm) + m[k] = interface{}(aa) + m = nm + } + } + + // value is nil, array or a singleton of some kind + // initially m.(type) == map[string]interface{} + v := m[k] + switch v.(type) { + case nil: // initialized + m[k] = newVal + case []interface{}: + a := m[k].([]interface{}) + a = append(a, newVal) + m[k] = interface{}(a) + default: // v exists:string, float64, bool, map[string]interface, etc. + a := make([]interface{}, 0) + a = append(a, v, newVal) + m[k] = interface{}(a) + } +} diff --git a/vendor/github.com/clbanning/mxj/readme.md b/vendor/github.com/clbanning/mxj/readme.md new file mode 100644 index 00000000000..6bb21dca82a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/readme.md @@ -0,0 +1,179 @@ +

mxj - to/from maps, XML and JSON

+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages. + +

Related Packages

+ +https://github.com/clbanning/checkxml provides functions for validating XML data. + +

Refactor Decoder - 2015.11.15

+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant. I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi. Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value. As shown by: + + BenchmarkNewMapXml-4 100000 18043 ns/op + BenchmarkNewStructXml-4 100000 14892 ns/op + BenchmarkNewMapJson-4 300000 4633 ns/op + BenchmarkNewStructJson-4 300000 3427 ns/op + BenchmarkNewMapXmlBooks-4 20000 82850 ns/op + BenchmarkNewStructXmlBooks-4 20000 67822 ns/op + BenchmarkNewMapJsonBooks-4 100000 17222 ns/op + BenchmarkNewStructJsonBooks-4 100000 15309 ns/op + +

Notices

+ + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq() + and mv.XmlSeq() / mv.XmlSeqIndent(). + 2015-05-20: New: mv.StringIndentNoTypeInfo(). + Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(), + mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo(). + 2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information. + (NOTE: PreserveXmlList() is similar and will be here soon.) + 2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + 2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references. + +

Basic Unmarshal XML to map[string]interface{}

+
type Map map[string]interface{}
+ +Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v': +
mv := Map(v)
+ +Unmarshal / marshal XML as a `Map` value, 'mv': +
mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal
+ +Unmarshal XML from an `io.Reader` as a `Map` value, 'mv': +
mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+ +Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`): +
err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+ +Also, for prettified output: +
xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+ +Bulk process XML with error handling (note: handlers must return a boolean value): +
err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+ +Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`. + +There are comparable functions and methods for JSON processing. + +Arbitrary structure values can be decoded to / encoded from `Map` values: +
mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)
+ +

Extract / modify Map values

+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON +or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then: +
paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+ +Get everything at once, irrespective of path depth: +
leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()
+ +A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML +or JSON. (Note: keys can use dot-notation.) +
newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto
+ +

Usage

+ +The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj). + +Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions. + +

XML parsing conventions

+ +Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + +Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - `something` parses to `map["ns:key"]interface{}{"something"}` + - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}` + +Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +

XML encoding conventions

+ + - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as ``. + NOTE: the operation is not symmetric as `` elements are decoded as `tag:""` `Map` values, + which, then, encode in JSON as `"tag":""` values. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +

Running "go test"

+ +Because there are no guarantees on the sequence map elements are retrieved, the tests have been +written for visual verification in most cases. One advantage is that you can easily use the +output from running "go test" as examples of calling the various functions and methods. + +

Motivation

+ +I make extensive use of JSON for messaging and typically unmarshal the messages into +`map[string]interface{}` values. This is easily done using `json.Unmarshal` from the +standard Go libraries. Unfortunately, many legacy solutions use structured +XML messages; in those environments the applications would have to be refactored to +interoperate with my components. + +The better solution is to just provide an alternative HTTP handler that receives +XML messages and parses it into a `map[string]interface{}` value and then reuse +all the JSON-based code. The Go `xml.Unmarshal()` function does not provide the same +option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote +a couple of small functions to fill this gap and released them as the x2j package. + +Over the next year and a half additional features were added, and the companion j2x +package was released to address XML encoding of arbitrary JSON and `map[string]interface{}` +values. As part of a refactoring of our production system and looking at how we had been +using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or +JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}` +values was the primary value. Thus, everything was refactored into the mxj package. + diff --git a/vendor/github.com/clbanning/mxj/remove.go b/vendor/github.com/clbanning/mxj/remove.go new file mode 100644 index 00000000000..8362ab17fa4 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/remove.go @@ -0,0 +1,37 @@ +package mxj + +import "strings" + +// Removes the path. +func (mv Map) Remove(path string) error { + m := map[string]interface{}(mv) + return remove(m, path) +} + +func remove(m interface{}, path string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + lastKey := lastKey(path) + delete(val, lastKey) + + return nil +} + +// returns the last key of the path. +// lastKey("a.b.c") would had returned "c" +func lastKey(path string) string { + keys := strings.Split(path, ".") + key := keys[len(keys)-1] + return key +} + +// returns the path without the last key +// parentPath("a.b.c") whould had returned "a.b" +func parentPath(path string) string { + keys := strings.Split(path, ".") + parentPath := strings.Join(keys[0:len(keys)-1], ".") + return parentPath +} diff --git a/vendor/github.com/clbanning/mxj/rename.go b/vendor/github.com/clbanning/mxj/rename.go new file mode 100644 index 00000000000..e95a9639af7 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/rename.go @@ -0,0 +1,54 @@ +package mxj + +import ( + "errors" + "strings" +) + +// RenameKey renames a key in a Map. +// It works only for nested maps. It doesn't work for cases when it buried in a list. +func (mv Map) RenameKey(path string, newName string) error { + if !mv.Exists(path) { + return errors.New("RenameKey: path not found: " + path) + } + if mv.Exists(parentPath(path) + "." + newName) { + return errors.New("RenameKey: key already exists: " + newName) + } + + m := map[string]interface{}(mv) + return renameKey(m, path, newName) +} + +func renameKey(m interface{}, path string, newName string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + oldName := lastKey(path) + val[newName] = val[oldName] + delete(val, oldName) + + return nil +} + +// returns a value which contains a last key in the path +// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3} +func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) { + keys := strings.Split(path, ".") + + switch mValue := m.(type) { + case map[string]interface{}: + for key, value := range mValue { + if key == keys[0] { + if len(keys) == 1 { + return mValue, nil + } else { + // keep looking for the full path to the key + return prevValueByPath(value, strings.Join(keys[1:], ".")) + } + } + } + } + return nil, errors.New("prevValueByPath: didn't find path – " + path) +} diff --git a/vendor/github.com/clbanning/mxj/set.go b/vendor/github.com/clbanning/mxj/set.go new file mode 100644 index 00000000000..a297fc38887 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/set.go @@ -0,0 +1,26 @@ +package mxj + +import ( + "strings" +) + +// Sets the value for the path +func (mv Map) SetValueForPath(value interface{}, path string) error { + pathAry := strings.Split(path, ".") + parentPathAry := pathAry[0 : len(pathAry)-1] + parentPath := strings.Join(parentPathAry, ".") + + val, err := mv.ValueForPath(parentPath) + if err != nil { + return err + } + if val == nil { + return nil // we just ignore the request if there's no val + } + + key := pathAry[len(pathAry)-1] + cVal := val.(map[string]interface{}) + cVal[key] = value + + return nil +} diff --git a/vendor/github.com/clbanning/mxj/setfieldsep.go b/vendor/github.com/clbanning/mxj/setfieldsep.go new file mode 100644 index 00000000000..b70715ebc65 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/setfieldsep.go @@ -0,0 +1,20 @@ +package mxj + +// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862 +var fieldSep string = ":" + +// SetFieldSeparator changes the default field separator, ":", for the +// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments +// in mv.ValuesForKey and mv.ValuesForPath. +// +// E.g., if the newVal value is "http://blah/blah", setting the field separator +// to "|" will allow the newVal specification, "|http://blah/blah" to parse +// properly. If called with no argument or an empty string value, the field +// separator is set to the default, ":". +func SetFieldSeparator(s ...string) { + if len(s) == 0 || s[0] == "" { + fieldSep = ":" // the default + return + } + fieldSep = s[0] +} diff --git a/vendor/github.com/clbanning/mxj/songtext.xml b/vendor/github.com/clbanning/mxj/songtext.xml new file mode 100644 index 00000000000..8c0f2becb12 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/songtext.xml @@ -0,0 +1,29 @@ + + help me! + + + + Henry was a renegade + Didn't like to play it safe + One component at a time + There's got to be a better way + Oh, people came from miles around + Searching for a steady job + Welcome to the Motor Town + Booming like an atom bomb + + + Oh, Henry was the end of the story + Then everything went wrong + And we'll return it to its former glory + But it just takes so long + + + + It's going to take a long time + It's going to take it, but we'll make it one day + It's going to take a long time + It's going to take it, but we'll make it one day + + + diff --git a/vendor/github.com/clbanning/mxj/strict.go b/vendor/github.com/clbanning/mxj/strict.go new file mode 100644 index 00000000000..1e769560ba0 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/strict.go @@ -0,0 +1,30 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// strict.go actually addresses setting xml.Decoder attribute +// values. This'll let you parse non-standard XML. + +package mxj + +import ( + "encoding/xml" +) + +// CustomDecoder can be used to specify xml.Decoder attribute +// values, e.g., Strict:false, to be used. By default CustomDecoder +// is nil. If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is +// ignored and must be set as part of the CustomDecoder value, if needed. +// Usage: +// mxj.CustomDecoder = &xml.Decoder{Strict:false} +var CustomDecoder *xml.Decoder + +// useCustomDecoder copy over public attributes from customDecoder +func useCustomDecoder(d *xml.Decoder) { + d.Strict = CustomDecoder.Strict + d.AutoClose = CustomDecoder.AutoClose + d.Entity = CustomDecoder.Entity + d.CharsetReader = CustomDecoder.CharsetReader + d.DefaultSpace = CustomDecoder.DefaultSpace +} + diff --git a/vendor/github.com/clbanning/mxj/struct.go b/vendor/github.com/clbanning/mxj/struct.go new file mode 100644 index 00000000000..9be636cdcab --- /dev/null +++ b/vendor/github.com/clbanning/mxj/struct.go @@ -0,0 +1,54 @@ +// Copyright 2012-2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "encoding/json" + "errors" + "reflect" + + // "github.com/fatih/structs" +) + +// Create a new Map value from a structure. Error returned if argument is not a structure. +// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map +// for handling of "structs" tags. + +// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map. +// import "github.com/fatih/structs" +// ... +// sm, err := structs.Map() +// if err != nil { +// // handle error +// } +// m := mxj.Map(sm) +// Alernatively uncomment the old source and import in struct.go. +func NewMapStruct(structVal interface{}) (Map, error) { + return nil, errors.New("deprecated - see package documentation") + /* + if !structs.IsStruct(structVal) { + return nil, errors.New("NewMapStruct() error: argument is not type Struct") + } + return structs.Map(structVal), nil + */ +} + +// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned +// if argument is not a pointer or if json.Unmarshal returns an error. +// json.Unmarshal structure encoding rules are followed to encode public structure fields. +func (mv Map) Struct(structPtr interface{}) error { + // should check that we're getting a pointer. + if reflect.ValueOf(structPtr).Kind() != reflect.Ptr { + return errors.New("mv.Struct() error: argument is not type Ptr") + } + + m := map[string]interface{}(mv) + j, err := json.Marshal(m) + if err != nil { + return err + } + + return json.Unmarshal(j, structPtr) +} diff --git a/vendor/github.com/clbanning/mxj/updatevalues.go b/vendor/github.com/clbanning/mxj/updatevalues.go new file mode 100644 index 00000000000..46779f4f063 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/updatevalues.go @@ -0,0 +1,256 @@ +// Copyright 2012-2014, 2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// updatevalues.go - modify a value based on path and possibly sub-keys +// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values. + +package mxj + +import ( + "fmt" + "strconv" + "strings" +) + +// Update value based on path and possible sub-key values. +// A count of the number of values changed and any error are returned. +// If the count == 0, then no path (and subkeys) matched. +// 'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified +// or a string value "key:value[:type]" where type is "bool" or "num" to cast the value. +// 'path' is dot-notation list of keys to traverse; last key in path can be newVal key +// NOTE: 'path' spec does not currently support indexed array references. +// 'subkeys' are "key:value[:type]" entries that must match for path node +// The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// +// NOTES: +// 1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value. +// 2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key. +// 3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol, +// perhaps "|". +func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) { + m := map[string]interface{}(mv) + + // extract the subkeys + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return 0, err + } + } + + // extract key and value from newVal + var key string + var val interface{} + switch newVal.(type) { + case map[string]interface{}, Map: + switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec) + case Map: + newVal = newVal.(Map).Old() + } + if len(newVal.(map[string]interface{})) != 1 { + return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal) + } + for key, val = range newVal.(map[string]interface{}) { + } + case string: // split it as a key:value pair + ss := strings.Split(newVal.(string), fieldSep) + n := len(ss) + if n < 2 || n > 3 { + return 0, fmt.Errorf("unknown newVal spec - %+v", newVal) + } + key = ss[0] + if n == 2 { + val = interface{}(ss[1]) + } else if n == 3 { + switch ss[2] { + case "bool", "boolean": + nv, err := strconv.ParseBool(ss[1]) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal) + } + val = interface{}(nv) + case "num", "numeric", "float", "int": + nv, err := strconv.ParseFloat(ss[1], 64) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal) + } + val = interface{}(nv) + default: + return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal) + } + } + default: + return 0, fmt.Errorf("invalid newVal type - %+v", newVal) + } + + // parse path + keys := strings.Split(path, ".") + + var count int + updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count) + + return count, nil +} + +// navigate the path +func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) { + // ----- at end node: looking at possible node to get 'key' ---- + if len(keys) == 1 { + updateValue(key, value, m, keys[0], subkeys, cnt) + return + } + + // ----- here we are navigating the path thru the penultimate node -------- + // key of interest is keys[0] - the next in the path + switch keys[0] { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + default: + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + } + } + } + } +} + +// change value if key and subkeys are present +func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) { + // there are two possible options for the value of 'keys0': map[string]interface, []interface{} + // and 'key' is a key in the map or is a key in a map in a list. + switch m.(type) { + case map[string]interface{}: // gotta have the last key + if keys0 == "*" { + for k := range m.(map[string]interface{}) { + updateValue(key, value, m, k, subkeys, cnt) + } + return + } + endVal, _ := m.(map[string]interface{})[keys0] + + // if newV key is the end of path, replace the value for path-end + // may be []interface{} - means replace just an entry w/ subkeys + // otherwise replace the keys0 value if subkeys are there + // NOTE: this will replace the subkeys, also + if key == keys0 { + switch endVal.(type) { + case map[string]interface{}: + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + case []interface{}: + // without subkeys can't select list member to modify + // so key:value spec is it ... + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + break + } + nv := make([]interface{}, 0) + var valmodified bool + for _, v := range endVal.([]interface{}) { + // check entry subkeys + if hasSubKeys(v, subkeys) { + // replace v with value + nv = append(nv, value) + valmodified = true + (*cnt)++ + continue + } + nv = append(nv, v) + } + if valmodified { + (m.(map[string]interface{}))[keys0] = interface{}(nv) + } + default: // anything else is a strict replacement + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + } + return + } + + // so value is for an element of endVal + // if endVal is a map then 'key' must be there w/ subkeys + // if endVal is a list then 'key' must be in a list member w/ subkeys + switch endVal.(type) { + case map[string]interface{}: + if !hasSubKeys(endVal, subkeys) { + return + } + if _, ok := (endVal.(map[string]interface{}))[key]; ok { + (endVal.(map[string]interface{}))[key] = value + (*cnt)++ + } + case []interface{}: // keys0 points to a list, check subkeys + for _, v := range endVal.([]interface{}) { + // got to be a map so we can replace value for 'key' + vv, vok := v.(map[string]interface{}) + if !vok { + continue + } + if _, ok := vv[key]; !ok { + continue + } + if !hasSubKeys(vv, subkeys) { + continue + } + vv[key] = value + (*cnt)++ + } + } + case []interface{}: // key may be in a list member + // don't need to handle keys0 == "*"; we're looking at everything, anyway. + for _, v := range m.([]interface{}) { + // only map values - we're looking for 'key' + mm, ok := v.(map[string]interface{}) + if !ok { + continue + } + if _, ok := mm[key]; !ok { + continue + } + if !hasSubKeys(mm, subkeys) { + continue + } + mm[key] = value + (*cnt)++ + } + } + + // return +} diff --git a/vendor/github.com/clbanning/mxj/xml.go b/vendor/github.com/clbanning/mxj/xml.go new file mode 100644 index 00000000000..fac0f1d3bb5 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/xml.go @@ -0,0 +1,1139 @@ +// Copyright 2012-2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xml.go - basically the core of X2j for map[string]interface{} values. +// NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter +// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages. + +package mxj + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ------------------- NewMapXml & NewMapXmlReader ... ------------------------- + +// If XmlCharsetReader != nil, it will be used to decode the XML, if required. +// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored; +// set the CustomDecoder attribute instead. +// import ( +// charset "code.google.com/p/go-charset/charset" +// github.com/clbanning/mxj +// ) +// ... +// mxj.XmlCharsetReader = charset.NewReader +// m, merr := mxj.NewMapXml(xmlValue) +var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error) + +// NewMapXml - convert a XML doc into a Map +// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().) +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// +// Converting XML to JSON is a simple as: +// ... +// mapVal, merr := mxj.NewMapXml(xmlVal) +// if merr != nil { +// // handle error +// } +// jsonVal, jerr := mapVal.Json() +// if jerr != nil { +// // handle error +// } +// +// NOTES: +// 1. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlToMap(xmlVal, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlReaderToMap(xmlReader, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) // see code at EOF + + m, err := xmlReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + if err != nil { + return nil, b, err + } + + return m, b, nil +} + +// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// xmlToMap - convert a XML doc into map[string]interface{} value +func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// PrependAttrWithHyphen. Prepend attribute tags with a hyphen. +// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.) +// Note: +// If 'false', unmarshaling and marshaling is not symmetric. Attributes will be +// marshal'd as attr and may be part of a list. +func PrependAttrWithHyphen(v bool) { + if v { + attrPrefix = "-" + lenAttrPrefix = len(attrPrefix) + return + } + attrPrefix = "" + lenAttrPrefix = len(attrPrefix) +} + +// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com. +var includeTagSeqNum bool + +// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting +// its position when parsed. This is of limited usefulness, since list values cannot +// be tagged with "_seq" without changing their depth in the Map. +// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what +// you get. +/* + + + + + hello + + + parses as: + + { + Obj:{ + "-c":"la", + "-h":"da", + "-x":"dee", + "intObj":[ + { + "-id"="3", + "_seq":"0" // if mxj.Cast is passed, then: "_seq":0 + }, + { + "-id"="2", + "_seq":"2" + }], + "intObj1":{ + "-id":"1", + "_seq":"1" + }, + "StrObj":{ + "#text":"hello", // simple element value gets "#text" tag + "_seq":"3" + } + } + } +*/ +func IncludeTagSeqNum(b bool) { + includeTagSeqNum = b +} + +// all keys will be "lower case" +var lowerCase bool + +// Coerce all tag values to keys in lower case. This is useful if you've got sources with variable +// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec +// in lower case. +// CoerceKeysToLower() will toggle the coercion flag true|false - on|off +// CoerceKeysToLower(true|false) will set the coercion flag on|off +// +// NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as +// the associated HandleXmlReader and HandleXmlReaderRaw. +func CoerceKeysToLower(b ...bool) { + if len(b) == 0 { + lowerCase = !lowerCase + } else if len(b) == 1 { + lowerCase = b[0] + } +} + +// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels. +// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "", +// and adding a SetAttrPrefix(s string) function. + +var attrPrefix string = `-` // the default +var lenAttrPrefix int = 1 // the default + +// SetAttrPrefix changes the default, "-", to the specified value, s. +// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false). +// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.) +func SetAttrPrefix(s string) { + attrPrefix = s + lenAttrPrefix = len(attrPrefix) +} + +// 18jan17: Allows user to specify if the map keys should be in snake case instead +// of the default hyphenated notation. +var snakeCaseKeys bool + +// CoerceKeysToSnakeCase changes the default, false, to the specified value, b. +// Note: the attribute prefix will be a hyphen, '-', or what ever string value has +// been specified using SetAttrPrefix. +func CoerceKeysToSnakeCase(b ...bool) { + if len(b) == 0 { + snakeCaseKeys = !snakeCaseKeys + } else if len(b) == 1 { + snakeCaseKeys = b[0] + } +} + +// 05feb17: support processing XMPP streams (issue #36) +var handleXMPPStreamTag bool + +// HandleXMPPStreamTag causes decoder to parse XMPP elements. +// If called with no argument, XMPP stream element handling is toggled on/off. +// (See xmppStream_test.go for example.) +// If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream" +// element will be returned as: +// map["stream"]interface{}{map[-]interface{}}. +// If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream" +// element will be returned as: +// map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}} +// where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.) +func HandleXMPPStreamTag(b ...bool) { + if len(b) == 0 { + handleXMPPStreamTag = !handleXMPPStreamTag + } else if len(b) == 1 { + handleXMPPStreamTag = b[0] + } +} + +// 21jan18 - decode all values as map["#text":value] (issue #56) +var decodeSimpleValuesAsMap bool + +// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":]. +// If called with no argument, the decoding is toggled on/off. +// +// By default the NewMapXml functions decode simple values without attributes as +// map[:]. This function causes simple values without attributes to be +// decoded the same as simple values with attributes - map[:map["#text":]]. +func DecodeSimpleValuesAsMap(b ...bool) { + if len(b) == 0 { + decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap + } else if len(b) == 1 { + decodeSimpleValuesAsMap = b[0] + } +} + +// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly. +// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one. +// We've removed the intermediate *node tree with the allocation and subsequent rescanning. +func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if lowerCase { + skey = strings.ToLower(skey) + } + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + // Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value. + var n, na map[string]interface{} + var seq int // for includeTagSeqNum + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + n = make(map[string]interface{}) // old n + na = make(map[string]interface{}) // old n.nodes + if len(a) > 0 { + for _, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + var key string + key = attrPrefix + v.Name.Local + if lowerCase { + key = strings.ToLower(key) + } + na[key] = cast(v.Value, r) + } + } + } + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.Token() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + return xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + // Note: + // * if CoerceKeysToLower() called, then key will be lower case. + // * if CoerceKeysToSnakeCase() called, then key will be converted to snake case. + var key string + var val interface{} + for key, val = range nn { + break + } + + // IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element. + // In theory, we don't need this if len(na) == 1. But, we don't know what might + // come next - we're only parsing forward. So if you ask for 'includeTagSeqNum' you + // get it on every element. (Personally, I never liked this, but I added it on request + // and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!) + if includeTagSeqNum { + switch val.(type) { + case []interface{}: + // noop - There's no clean way to handle this w/o changing message structure. + case map[string]interface{}: + val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val} + v["_seq"] = seq + seq++ + val = v + } + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Note: in original node-tree parser, val defaulted to ""; + // so we always had the default if len(node.nodes) == 0. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") + if len(tt) > 0 { + if len(na) > 0 || decodeSimpleValuesAsMap { + na["#text"] = cast(tt, r) + } else if skey != "" { + n[skey] = cast(tt, r) + } else { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + } + default: + // noop + } + } +} + +var castNanInf bool + +// Cast "Nan", "Inf", "-Inf" XML values to 'float64'. +// By default, these values will be decoded as 'string'. +func CastNanInf(b bool) { + castNanInf = b +} + +// cast - try to cast string values to bool or float64 +func cast(s string, r bool) interface{} { + if r { + // handle nan and inf + if !castNanInf { + switch strings.ToLower(s) { + case "nan", "inf", "-inf": + return s + } + } + + // handle numeric strings ahead of boolean + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + // ParseBool treats "1"==true & "0"==false, we've already scanned those + // values as float64. See if value has 't' or 'f' as initial screen to + // minimize calls to ParseBool; also, see if len(s) < 6. + if len(s) > 0 && len(s) < 6 { + switch s[:1] { + case "t", "T", "f", "F": + if b, err := strconv.ParseBool(s); err == nil { + return b + } + } + } + } + return s +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------ + +const ( + DefaultRootTag = "doc" +) + +var useGoXmlEmptyElemSyntax bool + +// XmlGoEmptyElemSyntax() - rather than . +// Go's encoding/xml package marshals empty XML elements as . By default this package +// encodes empty elements as . If you're marshaling Map values that include structures +// (which are passed to xml.Marshal for encoding), this will let you conform to the standard package. +func XmlGoEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = true +} + +// XmlDefaultEmptyElemSyntax() - rather than . +// Return XML encoding for empty elements to the default package setting. +// Reverses effect of XmlGoEmptyElemSyntax(). +func XmlDefaultEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = false +} + +// Encode a Map as XML. The companion of NewMapXml(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - Map keys that begin with a hyphen, '-', are interpreted as attributes. +// It is an error if the attribute doesn't have a []byte, string, number, or boolean value. +// - Map value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>". +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +// - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax(). +// The attributes tag=value pairs are alphabetized by "tag". Also, when encoding map[string]interface{} values - +// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted. +func (mv Map) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlIndent(false, s, DefaultRootTag, m, p) + } +done: + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// -------------- Handle XML stream by processing Map value -------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var xhandlerPollInterval = time.Millisecond + +// Bulk process XML using handlers that process a Map value. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapXmlReader(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process XML using handlers that process a Map value and the raw XML. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +// See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader. +func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapXmlReaderRaw(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// ----------------- END: Handle XML stream by processing Map value -------------- + +// -------- a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ---------- + +// This is a clone of io.TeeReader with the additional method t.ReadByte(). +// Thus, this TeeReader is also an io.ByteReader. +// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written +// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte().. +// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with +// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic. + +type teeReader struct { + r io.Reader + w io.Writer + b []byte +} + +func myTeeReader(r io.Reader, w io.Writer) io.Reader { + b := make([]byte, 1) + return &teeReader{r, w, b} +} + +// need for io.Reader - but we don't use it ... +func (t *teeReader) Read(p []byte) (int, error) { + return 0, nil +} + +func (t *teeReader) ReadByte() (byte, error) { + n, err := t.r.Read(t.b) + if n > 0 { + if _, err := t.w.Write(t.b[:1]); err != nil { + return t.b[0], err + } + } + return t.b[0], err +} + +// For use with NewMapXmlReader & NewMapXmlSeqReader. +type byteReader struct { + r io.Reader + b []byte +} + +func myByteReader(r io.Reader) io.Reader { + b := make([]byte, 1) + return &byteReader{r, b} +} + +// Need for io.Reader interface ... +// Needed if reading a malformed http.Request.Body - issue #38. +func (b *byteReader) Read(p []byte) (int, error) { + return b.r.Read(p) +} + +func (b *byteReader) ReadByte() (byte, error) { + _, err := b.r.Read(b.b) + if len(b.b) > 0 { + return b.b[0], err + } + var c byte + return c, err +} + +// ----------------------- END: io.TeeReader hack ----------------------------------- + +// ---------------------- XmlIndent - from j2x package ---------------------------- + +// Encode a map[string]interface{} as a pretty XML string. +// See Xml for encoding rules. +func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlIndent(true, s, DefaultRootTag, m, p) + } + return []byte(*s), err +} + +type pretty struct { + indent string + cnt int + padding string + mapDepth int + start int +} + +func (p *pretty) Indent() { + p.padding += p.indent + p.cnt++ +} + +func (p *pretty) Outdent() { + if p.cnt > 0 { + p.padding = p.padding[:len(p.padding)-len(p.indent)] + p.cnt-- + } +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var elen int + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + // per issue #48, 18apr18 - try and coerce maps to map[string]interface{} + // Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq(). + if reflect.ValueOf(value).Kind() == reflect.Map { + switch value.(type) { + case map[string]interface{}: + default: + val := make(map[string]interface{}) + vv := reflect.ValueOf(value) + keys := vv.MapKeys() + for _, k := range keys { + val[fmt.Sprint(k)] = vv.MapIndex(k).Interface() + } + value = val + } + } + + switch value.(type) { + // special handling of []interface{} values when len(value) == 0 + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number: + if doIndent { + *s += p.padding + } + *s += `<` + key + } + switch value.(type) { + case map[string]interface{}: + vv := value.(map[string]interface{}) + lenvv := len(vv) + // scan out attributes - attribute keys have prepended attrPrefix + attrlist := make([][2]string, len(vv)) + var n int + var ss string + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + switch v.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(v.(string)) + } else { + ss = v.(string) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + case float64, bool, int, int32, int64, float32, json.Number: + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = fmt.Sprintf("%v", v) + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(v.([]byte))) + } else { + ss = string(v.([]byte)) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + default: + return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v) + } + n++ + } + } + if n > 0 { + attrlist = attrlist[:n] + sort.Sort(attrList(attrlist)) + for _, v := range attrlist { + *s += ` ` + v[0] + `="` + v[1] + `"` + } + } + // only attributes? + if n == lenvv { + if useGoXmlEmptyElemSyntax { + *s += `" + } else { + *s += `/>` + } + break + } + + // simple element? Note: '#text" is an invalid XML tag. + if v, ok := vv["#text"]; ok && n+1 == lenvv { + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } + } + *s += ">" + fmt.Sprintf("%v", v) + endTag = true + elen = 1 + isSimple = true + break + } else if ok { + // Handle edge case where simple element with attributes + // is unmarshal'd using NewMapXml() where attribute prefix + // has been set to "". + // TODO(clb): should probably scan all keys for invalid chars. + return fmt.Errorf("invalid attribute key label: #text - due to attributes not being prefixed") + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + // extract the map k:v pairs and sort on key + elemlist := make([][2]interface{}, len(vv)) + n = 0 + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + continue + } + elemlist[n][0] = k + elemlist[n][1] = v + n++ + } + elemlist = elemlist[:n] + sort.Sort(elemList(elemlist)) + var i int + for _, v := range elemlist { + switch v[1].(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlIndent(doIndent, s, v[0].(string), v[1], p); err != nil { + return err + } + switch v[1].(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content ... + case []interface{}: + // special case - found during implementing Issue #23 + if len(value.([]interface{})) == 0 { + if doIndent { + *s += p.padding + p.indent + } + *s += "<" + key + elen = 0 + endTag = true + break + } + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case []string: + // This was added by https://github.com/slotix ... not a type that + // would be encountered if mv generated from NewMapXml, NewMapJson. + // Could be encountered in AnyXml(), so we'll let it stay, though + // it should be merged with case []interface{}, above. + //quick fix for []string type + //[]string should be treated exaclty as []interface{} + if len(value.([]string)) == 0 { + if doIndent { + *s += p.padding + p.indent + } + *s += "<" + key + elen = 0 + endTag = true + break + } + for _, v := range value.([]string) { + if doIndent { + p.Indent() + } + if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + v := value.(string) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case float64, bool, int, int32, int64, float32, json.Number: + v := fmt.Sprintf("%v", value) + elen = len(v) // always > 0 + *s += ">" + v + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + v := string(value.([]byte)) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + *s += ">" + v + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// ============================ sort interface implementation ================= + +type attrList [][2]string + +func (a attrList) Len() int { + return len(a) +} + +func (a attrList) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a attrList) Less(i, j int) bool { + return a[i][0] <= a[j][0] +} + +type elemList [][2]interface{} + +func (e elemList) Len() int { + return len(e) +} + +func (e elemList) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemList) Less(i, j int) bool { + return e[i][0].(string) <= e[j][0].(string) +} diff --git a/vendor/github.com/clbanning/mxj/xmlseq.go b/vendor/github.com/clbanning/mxj/xmlseq.go new file mode 100644 index 00000000000..6be73ae60d9 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/xmlseq.go @@ -0,0 +1,828 @@ +// Copyright 2012-2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding. +// Also, handles comments, directives and process instructions. + +package mxj + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +var NoRoot = errors.New("no root key") +var NO_ROOT = NoRoot // maintain backwards compatibility + +// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... ------------------------- + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// The xml.Decoder.RawToken method is used to parse the XML, so there is no checking for appropriate xml.EndElement values; +// thus, it is assumed that the XML is valid. +// +// NewMapXmlSeq - convert a XML doc into a Map with elements id'd with decoding sequence int - #seq. +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// NOTE: "#seq" key/value pairs are removed on encoding with mv.XmlSeq() / mv.XmlSeqIndent(). +// • attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":, "#seq":} +// • all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well. +// • lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that +// include a "#seq" k:v pair based on sequence they are decoded. Thus, XML like: +// +// value 1 +// value 2 +// value 3 +// +// is decoded as: +// doc : +// ltag :[[]interface{}] +// [item: 0] +// #seq :[int] 0 +// #text :[string] value 1 +// [item: 1] +// #seq :[int] 2 +// #text :[string] value 3 +// newtag : +// #seq :[int] 1 +// #text :[string] value 2 +// It will encode in proper sequence even though the Map representation merges all "ltag" elements in an array. +// • comments - "" - are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair. +// • directives - "" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair. +// • process instructions - "" - are decoded as map["#procinst"]interface{} where the #procinst value +// is of map[string]interface{} type with the following keys: #target, #inst, and #seq. +// • comments, directives, and procinsts that are NOT part of a document with a root key will be returned as +// map[string]interface{} and the error value 'NoRoot'. +// • note: ": tag preserve the +// ":" notation rather than stripping it as with NewMapXml(). +// 2. Attribute keys for name space prefix declarations preserve "xmlns:" notation. +func NewMapXmlSeq(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlSeqToMap(xmlVal, r) +} + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlSeqReaderToMap(xmlReader, r) +} + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) + + m, err := xmlSeqReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + // err may be NoRoot + return m, b, err +} + +// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// xmlSeqToMap - convert a XML doc into map[string]interface{} value +func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly. +// Add #seq tag value for each element decoded - to be used for Encoding later. +func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + var n, na map[string]interface{} + var seq int // for including seq num when decoding + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + // 'n' only needs one slot - save call to runtime•hashGrow() + // 'na' we don't know + n = make(map[string]interface{}, 1) + na = make(map[string]interface{}) + if len(a) > 0 { + // xml.Attr is decoded into: map["#attr"]map[]interface{} + // where interface{} is map[string]interface{}{"#text":, "#seq":} + aa := make(map[string]interface{}, len(a)) + for i, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + if len(v.Name.Space) > 0 { + aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} + } else { + aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} + } + } + na["#attr"] = aa + } + } + + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream:stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.RawToken() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + if len(tt.Name.Space) > 0 { + return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + var nn map[string]interface{} + if len(tt.Name.Space) > 0 { + nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + var key string + var val interface{} + for key, val = range nn { + break + } + + // add "#seq" k:v pair - + // Sequence number included even in list elements - this should allow us + // to properly resequence even something goofy like: + // item 1 + // item 2 + // item 3 + // where all the "list" subelements are decoded into an array. + switch val.(type) { + case map[string]interface{}: + val.(map[string]interface{})["#seq"] = seq + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val, "#seq": seq} + seq++ + val = v + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + if skey != "" { + tt := t.(xml.EndElement) + if snakeCaseKeys { + tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1) + } + var name string + if len(tt.Name.Space) > 0 { + name = tt.Name.Space + `:` + tt.Name.Local + } else { + name = tt.Name.Local + } + if skey != name { + return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d", + skey, name, p.InputOffset()) + } + } + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Empty element content will be map["etag"]map["#text"]"" + // after #seq injection - map["etag"]map["#seq"]seq - after return. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") + if skey == "" { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + if len(tt) > 0 { + // every simple element is a #text and has #seq associated with it + na["#text"] = cast(tt, r) + na["#seq"] = seq + seq++ + } + case xml.Comment: + if n == nil { // no root 'key' + n = map[string]interface{}{"#comment": string(t.(xml.Comment))} + return n, NoRoot + } + cm := make(map[string]interface{}, 2) + cm["#text"] = string(t.(xml.Comment)) + cm["#seq"] = seq + seq++ + na["#comment"] = cm + case xml.Directive: + if n == nil { // no root 'key' + n = map[string]interface{}{"#directive": string(t.(xml.Directive))} + return n, NoRoot + } + dm := make(map[string]interface{}, 2) + dm["#text"] = string(t.(xml.Directive)) + dm["#seq"] = seq + seq++ + na["#directive"] = dm + case xml.ProcInst: + if n == nil { + na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)} + n = map[string]interface{}{"#procinst": na} + return n, NoRoot + } + pm := make(map[string]interface{}, 3) + pm["#target"] = t.(xml.ProcInst).Target + pm["#inst"] = string(t.(xml.ProcInst).Inst) + pm["#seq"] = seq + seq++ + na["#procinst"] = pm + default: + // noop - shouldn't ever get here, now, since we handle all token types + } + } +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// --------------------- mv.XmlSeq & mv.XmlSeqWriter ------------------------- + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Encode a Map as XML with elements sorted on #seq. The companion of NewMapXmlSeq(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - The "#seq" key is used to seqence the subelements or attributes but is ignored for writing. +// - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key. +// - The "#comment" map key identifies a comment in the value "#text" map entry - . +// - The "#directive" map key identifies a directive in the value "#text" map entry - . +// - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst" +// map entries - . +// - Value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called. +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +func (mv Map) XmlSeq(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it's an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlSeqIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + } +done: + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as XML on the Writer. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.XmlSeq(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeq(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlSeqIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// ---------------------- XmlSeqIndent ---------------------------- + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Encode a map[string]interface{} as a pretty XML string. +// See mv.XmlSeq() for encoding rules. +func (mv Map) XmlSeqIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlSeqIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } + return []byte(*s), err +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var noEndTag bool + var elen int + var ss string + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if doIndent { + *s += p.padding + } + if key != "#comment" && key != "#directive" && key != "#procinst" { + *s += `<` + key + } + } + switch value.(type) { + case map[string]interface{}: + val := value.(map[string]interface{}) + + if key == "#comment" { + *s += `` + noEndTag = true + break + } + + if key == "#directive" { + *s += `` + noEndTag = true + break + } + + if key == "#procinst" { + *s += `` + noEndTag = true + break + } + + haveAttrs := false + // process attributes first + if v, ok := val["#attr"].(map[string]interface{}); ok { + // First, unroll the map[string]interface{} into a []keyval array. + // Then sequence it. + kv := make([]keyval, len(v)) + n := 0 + for ak, av := range v { + kv[n] = keyval{ak, av} + n++ + } + sort.Sort(elemListSeq(kv)) + // Now encode the attributes in original decoding sequence, using keyval array. + for _, a := range kv { + vv := a.v.(map[string]interface{}) + switch vv["#text"].(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(vv["#text"].(string)) + } else { + ss = vv["#text"].(string) + } + *s += ` ` + a.k + `="` + ss + `"` + case float64, bool, int, int32, int64, float32: + *s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"` + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(vv["#text"].([]byte))) + } else { + ss = string(vv["#text"].([]byte)) + } + *s += ` ` + a.k + `="` + ss + `"` + default: + return fmt.Errorf("invalid attribute value for: %s", a.k) + } + } + haveAttrs = true + } + + // simple element? + // every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr" + _, seqOK := val["#seq"] // have key + if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK { + if stmp, ok := v.(string); ok && stmp != "" { + if xmlEscapeChars { + stmp = escapeChars(stmp) + } + *s += ">" + stmp + endTag = true + elen = 1 + } + isSimple = true + break + } else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK { + // here no #text but have #seq or #seq+#attr + endTag = false + break + } + + // we now need to sequence everything except attributes + // 'kv' will hold everything that needs to be written + kv := make([]keyval, 0) + for k, v := range val { + if k == "#attr" { // already processed + continue + } + if k == "#seq" { // ignore - just for sorting + continue + } + switch v.(type) { + case []interface{}: + // unwind the array as separate entries + for _, vv := range v.([]interface{}) { + kv = append(kv, keyval{k, vv}) + } + default: + kv = append(kv, keyval{k, v}) + } + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + sort.Sort(elemListSeq(kv)) + i := 0 + for _, v := range kv { + switch v.v.(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil { + return err + } + switch v.v.(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content other than attrs + case []interface{}: + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(value.(string)) + } else { + ss = value.(string) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + case float64, bool, int, int32, int64, float32: + v := fmt.Sprintf("%v", value) + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + if xmlEscapeChars { + ss = escapeChars(string(value.([]byte))) + } else { + ss = string(value.([]byte)) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag && !noEndTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + } else if !noEndTag { + if useGoXmlEmptyElemSyntax { + *s += `" + // *s += ">" + } else { + *s += "/>" + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// the element sort implementation + +type keyval struct { + k string + v interface{} +} +type elemListSeq []keyval + +func (e elemListSeq) Len() int { + return len(e) +} + +func (e elemListSeq) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemListSeq) Less(i, j int) bool { + var iseq, jseq int + var ok bool + if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok { + iseq = 9999999 + } + + if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok { + jseq = 9999999 + } + + return iseq <= jseq +} + +// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio + +// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent(). +func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) { + x, err := NewMapXmlSeq(b) + if err != nil { + return nil, err + } + return x.XmlSeqIndent(prefix, indent) +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000000..bc52e96f2b0 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000000..792994785e3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000000..205c28d68c4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000000..1be8ce94576 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000000..2e3d22f3120 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000000..aacaac6f1e1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000000..f78d89fc1f6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000000..b04edb7d7ac --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000000..32c0e338825 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE new file mode 100644 index 00000000000..22080f736a4 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go new file mode 100644 index 00000000000..7a6f8203c67 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go @@ -0,0 +1,79 @@ +package rendezvous + +type Rendezvous struct { + nodes map[string]int + nstr []string + nhash []uint64 + hash Hasher +} + +type Hasher func(s string) uint64 + +func New(nodes []string, hash Hasher) *Rendezvous { + r := &Rendezvous{ + nodes: make(map[string]int, len(nodes)), + nstr: make([]string, len(nodes)), + nhash: make([]uint64, len(nodes)), + hash: hash, + } + + for i, n := range nodes { + r.nodes[n] = i + r.nstr[i] = n + r.nhash[i] = hash(n) + } + + return r +} + +func (r *Rendezvous) Lookup(k string) string { + // short-circuit if we're empty + if len(r.nodes) == 0 { + return "" + } + + khash := r.hash(k) + + var midx int + var mhash = xorshiftMult64(khash ^ r.nhash[0]) + + for i, nhash := range r.nhash[1:] { + if h := xorshiftMult64(khash ^ nhash); h > mhash { + midx = i + 1 + mhash = h + } + } + + return r.nstr[midx] +} + +func (r *Rendezvous) Add(node string) { + r.nodes[node] = len(r.nstr) + r.nstr = append(r.nstr, node) + r.nhash = append(r.nhash, r.hash(node)) +} + +func (r *Rendezvous) Remove(node string) { + // find index of node to remove + nidx := r.nodes[node] + + // remove from the slices + l := len(r.nstr) + r.nstr[nidx] = r.nstr[l] + r.nstr = r.nstr[:l] + + r.nhash[nidx] = r.nhash[l] + r.nhash = r.nhash[:l] + + // update the map + delete(r.nodes, node) + moved := r.nstr[nidx] + r.nodes[moved] = nidx +} + +func xorshiftMult64(x uint64) uint64 { + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + return x * 2685821657736338717 +} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE new file mode 100644 index 00000000000..698a3f51397 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md new file mode 100644 index 00000000000..2d1b3d93225 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md @@ -0,0 +1,34 @@ +circuit-breaker +=============== + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +The circuit-breaker resiliency pattern for golang. + +Creating a breaker takes three parameters: +- error threshold (for opening the breaker) +- success threshold (for closing the breaker) +- timeout (how long to keep the breaker open) + +```go +b := breaker.New(3, 1, 5*time.Second) + +for { + result := b.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case breaker.ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } +} +``` diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 00000000000..f88ca7248b0 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml new file mode 100644 index 00000000000..d6cf4f1fa1b --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: +- 1.5.4 +- 1.6.1 + +sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 00000000000..5bf3688d9e4 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md new file mode 100644 index 00000000000..3f2695c7282 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/README.md @@ -0,0 +1,13 @@ +# go-xerial-snappy + +[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) + +Xerial-compatible Snappy framing support for golang. + +Packages using Xerial for snappy encoding use a framing format incompatible with +basically everything else in existence. This package wraps Go's built-in snappy +package to support it. + +Apps that use this format include Apache Kafka (see +https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for +details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go new file mode 100644 index 00000000000..6a46f4784e1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz + +package snappy + +func Fuzz(data []byte) int { + decode, err := Decode(data) + if decode == nil && err == nil { + panic("nil error with nil result") + } + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 00000000000..ea8f7afeb33 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,131 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + + master "github.com/golang/snappy" +) + +const ( + sizeOffset = 16 + sizeBytes = 4 +) + +var ( + xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + + // This is xerial version 1 and minimally compatible with version 1 + xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1} + + // ErrMalformed is returned by the decoder when the xerial framing + // is malformed + ErrMalformed = errors.New("malformed xerial framing") +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// EncodeStream *appends* to the specified 'dst' the compressed +// 'src' in xerial framing format. If 'dst' does not have enough +// capacity, then a new slice will be allocated. If 'dst' has +// non-zero length, then if *must* have been built using this function. +func EncodeStream(dst, src []byte) []byte { + if len(dst) == 0 { + dst = append(dst, xerialHeader...) + dst = append(dst, xerialVersionInfo...) + } + + // Snappy encode in blocks of maximum 32KB + var ( + max = len(src) + blockSize = 32 * 1024 + pos = 0 + chunk []byte + ) + + for pos < max { + newPos := min(pos + blockSize, max) + chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos]) + + // First encode the compressed size (big-endian) + // Put* panics if the buffer is too small, so pad 4 bytes first + origLen := len(dst) + dst = append(dst, dst[0:4]...) + binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk))) + + // And now the compressed data + dst = append(dst, chunk...) + pos = newPos + } + return dst +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + return DecodeInto(nil, src) +} + +// DecodeInto decodes snappy data whether it is traditional unframed +// or includes the xerial framing format into the specified `dst`. +// It is assumed that the entirety of `dst` including all capacity is available +// for use by this function. If `dst` is nil *or* insufficiently large to hold +// the decoded `src`, new space will be allocated. +func DecodeInto(dst, src []byte) ([]byte, error) { + var max = len(src) + if max < len(xerialHeader) { + return nil, ErrMalformed + } + + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(dst[:cap(dst)], src) + } + + if max < sizeOffset+sizeBytes { + return nil, ErrMalformed + } + + if dst == nil { + dst = make([]byte, 0, len(src)) + } + + dst = dst[:0] + var ( + pos = sizeOffset + chunk []byte + err error + ) + + for pos+sizeBytes <= max { + size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes])) + pos += sizeBytes + + nextPos := pos + size + // On architectures where int is 32-bytes wide size + pos could + // overflow so we need to check the low bound as well as the + // high + if nextPos < pos || nextPos > max { + return nil, ErrMalformed + } + + chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos]) + + if err != nil { + return nil, err + } + pos = nextPos + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/vendor/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml new file mode 100644 index 00000000000..235a40a493f --- /dev/null +++ b/vendor/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 00000000000..d5f36dbcaaf --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md new file mode 100644 index 00000000000..8e782335cd7 --- /dev/null +++ b/vendor/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 00000000000..71d1acdf27b --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore new file mode 100644 index 00000000000..47bb0de48e9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.msg +*.lok + +samples/trivial +samples/trivial2 +samples/sample +samples/reconnect +samples/ssl +samples/custom_store +samples/simple +samples/stdinpub +samples/stdoutsub +samples/routing \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md new file mode 100644 index 00000000000..9791dc60318 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md @@ -0,0 +1,56 @@ +Contributing to Paho +==================== + +Thanks for your interest in this project. + +Project description: +-------------------- + +The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). +Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. + +- https://projects.eclipse.org/projects/technology.paho + +Developer resources: +-------------------- + +Information regarding source code management, builds, coding standards, and more. + +- https://projects.eclipse.org/projects/technology.paho/developer + +Contributor License Agreement: +------------------------------ + +Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). + +- http://www.eclipse.org/legal/CLA.php + +Contributing Code: +------------------ + +The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/ + +Git commit messages should follow the style described here; + +http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + +Contact: +-------- + +Contact the project developers via the project's "dev" list. + +- https://dev.eclipse.org/mailman/listinfo/paho-dev + +Search for bugs: +---------------- + +This project uses Github issues to track ongoing development and issues. + +- https://github.com/eclipse/paho.mqtt.golang/issues + +Create a new bug: +----------------- + +Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! + +- https://github.com/eclipse/paho.mqtt.golang/issues diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION new file mode 100644 index 00000000000..34e49731daa --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION @@ -0,0 +1,15 @@ + + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE new file mode 100644 index 00000000000..aa7cc810fa1 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md new file mode 100644 index 00000000000..81c7148e093 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/README.md @@ -0,0 +1,67 @@ + +[![GoDoc](https://godoc.org/github.com/eclipse/paho.mqtt.golang?status.svg)](https://godoc.org/github.com/eclipse/paho.mqtt.golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/eclipse/paho.mqtt.golang)](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang) + +Eclipse Paho MQTT Go client +=========================== + + +This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library. + +This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages. + +This library supports a fully asynchronous mode of operation. + + +Installation and Build +---------------------- + +This client is designed to work with the standard Go tools, so installation is as easy as: + +``` +go get github.com/eclipse/paho.mqtt.golang +``` + +The client depends on Google's [websockets](https://godoc.org/golang.org/x/net/websocket) and [proxy](https://godoc.org/golang.org/x/net/proxy) package, +also easily installed with the commands: + +``` +go get golang.org/x/net/websocket +go get golang.org/x/net/proxy +``` + + +Usage and API +------------- + +Detailed API documentation is available by using to godoc tool, or can be browsed online +using the [godoc.org](http://godoc.org/github.com/eclipse/paho.mqtt.golang) service. + +Make use of the library by importing it in your Go client source code. For example, +``` +import "github.com/eclipse/paho.mqtt.golang" +``` + +Samples are available in the `cmd` directory for reference. + + +Runtime tracing +--------------- + +Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG + + +Reporting bugs +-------------- + +Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues + + +More information +---------------- + +Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). + +General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). + +There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/about.html b/vendor/github.com/eclipse/paho.mqtt.golang/about.html new file mode 100644 index 00000000000..b183f417abb --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/about.html @@ -0,0 +1,41 @@ + + + +About + + +

About This Content

+ +

December 9, 2013

+

License

+ +

The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise +indicated below, the Content is provided to you under the terms and conditions of the +Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL"). +A copy of the EPL is available at +http://www.eclipse.org/legal/epl-v10.html +and a copy of the EDL is available at +http://www.eclipse.org/org/documents/edl-v10.php. +For purposes of the EPL, "Program" will mean the Content.

+ +

If you did not receive this Content directly from the Eclipse Foundation, the Content is +being redistributed by another party ("Redistributor") and different terms and conditions may +apply to your use of any object code in the Content. Check the Redistributor's license that was +provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise +indicated below, the terms and conditions of the EPL still apply to any source code in the Content +and such source code may be obtained at http://www.eclipse.org.

+ + +

Third Party Content

+

The Content includes items that have been sourced from third parties as set out below. If you + did not receive this Content directly from the Eclipse Foundation, the following is provided + for informational purposes only, and you should look to the Redistributor's license for + terms and conditions of use.

+

+ None

+

+

+ + + + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go new file mode 100644 index 00000000000..24d56c1f38b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/client.go @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Portions copyright © 2018 TIBCO Software Inc. + +// Package mqtt provides an MQTT v3.1.1 client library. +package mqtt + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + disconnected uint32 = iota + connecting + reconnecting + connected +) + +// Client is the interface definition for a Client as used by this +// library, the interface is primarily to allow mocking tests. +// +// It is an MQTT v3.1.1 client for communicating +// with an MQTT server using non-blocking methods that allow work +// to be done in the background. +// An application may connect to an MQTT server using: +// A plain TCP socket +// A secure SSL/TLS socket +// A websocket +// To enable ensured message delivery at Quality of Service (QoS) levels +// described in the MQTT spec, a message persistence mechanism must be +// used. This is done by providing a type which implements the Store +// interface. For convenience, FileStore and MemoryStore are provided +// implementations that should be sufficient for most use cases. More +// information can be found in their respective documentation. +// Numerous connection options may be specified by configuring a +// and then supplying a ClientOptions type. +type Client interface { + // IsConnected returns a bool signifying whether + // the client is connected or not. + IsConnected() bool + // IsConnectionOpen return a bool signifying wether the client has an active + // connection to mqtt broker, i.e not in disconnected or reconnect mode + IsConnectionOpen() bool + // Connect will create a connection to the message broker, by default + // it will attempt to connect at v3.1.1 and auto retry at v3.1 if that + // fails + Connect() Token + // Disconnect will end the connection with the server, but not before waiting + // the specified number of milliseconds to wait for existing work to be + // completed. + Disconnect(quiesce uint) + // Publish will publish a message with the specified QoS and content + // to the specified topic. + // Returns a token to track delivery of the message to the broker + Publish(topic string, qos byte, retained bool, payload interface{}) Token + // Subscribe starts a new subscription. Provide a MessageHandler to be executed when + // a message is published on the topic provided, or nil for the default handler + Subscribe(topic string, qos byte, callback MessageHandler) Token + // SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to + // be executed when a message is published on one of the topics provided, or nil for the + // default handler + SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token + // Unsubscribe will end the subscription from each of the topics provided. + // Messages published to those topics from other clients will no longer be + // received. + Unsubscribe(topics ...string) Token + // AddRoute allows you to add a handler for messages on a specific topic + // without making a subscription. For example having a different handler + // for parts of a wildcard subscription + AddRoute(topic string, callback MessageHandler) + // OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions + // in use by the client. + OptionsReader() ClientOptionsReader +} + +// client implements the Client interface +type client struct { + lastSent atomic.Value + lastReceived atomic.Value + pingOutstanding int32 + status uint32 + sync.RWMutex + messageIds + conn net.Conn + ibound chan packets.ControlPacket + obound chan *PacketAndToken + oboundP chan *PacketAndToken + msgRouter *router + stopRouter chan bool + incomingPubChan chan *packets.PublishPacket + errors chan error + stop chan struct{} + persist Store + options ClientOptions + workers sync.WaitGroup +} + +// NewClient will create an MQTT v3.1.1 client with all of the options specified +// in the provided ClientOptions. The client must have the Connect method called +// on it before it may be used. This is to make sure resources (such as a net +// connection) are created before the application is actually ready. +func NewClient(o *ClientOptions) Client { + c := &client{} + c.options = *o + + if c.options.Store == nil { + c.options.Store = NewMemoryStore() + } + switch c.options.ProtocolVersion { + case 3, 4: + c.options.protocolVersionExplicit = true + case 0x83, 0x84: + c.options.protocolVersionExplicit = true + default: + c.options.ProtocolVersion = 4 + c.options.protocolVersionExplicit = false + } + c.persist = c.options.Store + c.status = disconnected + c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)} + c.msgRouter, c.stopRouter = newRouter() + c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler) + if !c.options.AutoReconnect { + c.options.MessageChannelDepth = 0 + } + return c +} + +// AddRoute allows you to add a handler for messages on a specific topic +// without making a subscription. For example having a different handler +// for parts of a wildcard subscription +func (c *client) AddRoute(topic string, callback MessageHandler) { + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } +} + +// IsConnected returns a bool signifying whether +// the client is connected or not. +func (c *client) IsConnected() bool { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + switch { + case status == connected: + return true + case c.options.AutoReconnect && status > connecting: + return true + default: + return false + } +} + +// IsConnectionOpen return a bool signifying whether the client has an active +// connection to mqtt broker, i.e not in disconnected or reconnect mode +func (c *client) IsConnectionOpen() bool { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + switch { + case status == connected: + return true + default: + return false + } +} + +func (c *client) connectionStatus() uint32 { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + return status +} + +func (c *client) setConnected(status uint32) { + c.Lock() + defer c.Unlock() + atomic.StoreUint32(&c.status, uint32(status)) +} + +//ErrNotConnected is the error returned from function calls that are +//made when the client is not connected to a broker +var ErrNotConnected = errors.New("Not Connected") + +// Connect will create a connection to the message broker, by default +// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that +// fails +func (c *client) Connect() Token { + var err error + t := newToken(packets.Connect).(*ConnectToken) + DEBUG.Println(CLI, "Connect()") + + c.obound = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.oboundP = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.ibound = make(chan packets.ControlPacket) + + go func() { + c.persist.Open() + + c.setConnected(connecting) + c.errors = make(chan error, 1) + c.stop = make(chan struct{}) + + var rc byte + protocolVersion := c.options.ProtocolVersion + + if len(c.options.Servers) == 0 { + t.setError(fmt.Errorf("No servers defined to connect to")) + return + } + + for _, broker := range c.options.Servers { + cm := newConnectMsgFromOptions(&c.options, broker) + c.options.ProtocolVersion = protocolVersion + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + case 0x83: + DEBUG.Println(CLI, "Using MQTT 3.1b protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 0x83 + case 0x84: + DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 0x84 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc, t.sessionPresent = c.connect() + if rc != packets.Accepted { + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + + if c.conn == nil { + ERROR.Println(CLI, "Failed to connect to a broker") + c.setConnected(disconnected) + c.persist.Close() + t.returnCode = rc + if rc != packets.ErrNetworkError { + t.setError(packets.ConnErrors[rc]) + } else { + t.setError(fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)) + } + return + } + + c.options.protocolVersionExplicit = true + + if c.options.KeepAlive != 0 { + atomic.StoreInt32(&c.pingOutstanding, 0) + c.lastReceived.Store(time.Now()) + c.lastSent.Store(time.Now()) + c.workers.Add(1) + go keepalive(c) + } + + c.incomingPubChan = make(chan *packets.PublishPacket, c.options.MessageChannelDepth) + c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c) + + c.setConnected(connected) + DEBUG.Println(CLI, "client is connected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + c.workers.Add(4) + go errorWatch(c) + go alllogic(c) + go outgoing(c) + go incoming(c) + + // Take care of any messages in the store + if c.options.CleanSession == false { + c.resume(c.options.ResumeSubs) + } else { + c.persist.Reset() + } + + DEBUG.Println(CLI, "exit startClient") + t.flowComplete() + }() + return t +} + +// internal function used to reconnect the client when it loses its connection +func (c *client) reconnect() { + DEBUG.Println(CLI, "enter reconnect") + var ( + err error + + rc = byte(1) + sleep = time.Duration(1 * time.Second) + ) + + for rc != 0 && atomic.LoadUint32(&c.status) != disconnected { + for _, broker := range c.options.Servers { + cm := newConnectMsgFromOptions(&c.options, broker) + DEBUG.Println(CLI, "about to write new connect msg") + c.Lock() + c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders) + c.Unlock() + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 0x83: + DEBUG.Println(CLI, "Using MQTT 3.1b protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 0x83 + case 0x84: + DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 0x84 + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc, _ = c.connect() + if rc != packets.Accepted { + c.conn.Close() + c.conn = nil + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc]) + continue + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + if rc != 0 { + DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds") + time.Sleep(sleep) + if sleep < c.options.MaxReconnectInterval { + sleep *= 2 + } + + if sleep > c.options.MaxReconnectInterval { + sleep = c.options.MaxReconnectInterval + } + } + } + // Disconnect() must have been called while we were trying to reconnect. + if c.connectionStatus() == disconnected { + DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect") + return + } + + c.stop = make(chan struct{}) + + if c.options.KeepAlive != 0 { + atomic.StoreInt32(&c.pingOutstanding, 0) + c.lastReceived.Store(time.Now()) + c.lastSent.Store(time.Now()) + c.workers.Add(1) + go keepalive(c) + } + + c.setConnected(connected) + DEBUG.Println(CLI, "client is reconnected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + c.workers.Add(4) + go errorWatch(c) + go alllogic(c) + go outgoing(c) + go incoming(c) + + c.resume(false) +} + +// This function is only used for receiving a connack +// when the connection is first started. +// This prevents receiving incoming data while resume +// is in progress if clean session is false. +func (c *client) connect() (byte, bool) { + DEBUG.Println(NET, "connect started") + + ca, err := packets.ReadPacket(c.conn) + if err != nil { + ERROR.Println(NET, "connect got error", err) + return packets.ErrNetworkError, false + } + if ca == nil { + ERROR.Println(NET, "received nil packet") + return packets.ErrNetworkError, false + } + + msg, ok := ca.(*packets.ConnackPacket) + if !ok { + ERROR.Println(NET, "received msg that was not CONNACK") + return packets.ErrNetworkError, false + } + + DEBUG.Println(NET, "received connack") + return msg.ReturnCode, msg.SessionPresent +} + +// Disconnect will end the connection with the server, but not before waiting +// the specified number of milliseconds to wait for existing work to be +// completed. +func (c *client) Disconnect(quiesce uint) { + status := atomic.LoadUint32(&c.status) + if status == connected { + DEBUG.Println(CLI, "disconnecting") + c.setConnected(disconnected) + + dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) + dt := newToken(packets.Disconnect) + c.oboundP <- &PacketAndToken{p: dm, t: dt} + + // wait for work to finish, or quiesce time consumed + dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) + } else { + WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)") + c.setConnected(disconnected) + } + + c.disconnect() +} + +// ForceDisconnect will end the connection with the mqtt broker immediately. +func (c *client) forceDisconnect() { + if !c.IsConnected() { + WARN.Println(CLI, "already disconnected") + return + } + c.setConnected(disconnected) + c.conn.Close() + DEBUG.Println(CLI, "forcefully disconnecting") + c.disconnect() +} + +func (c *client) internalConnLost(err error) { + // Only do anything if this was called and we are still "connected" + // forceDisconnect can cause incoming/outgoing/alllogic to end with + // error from closing the socket but state will be "disconnected" + if c.IsConnected() { + c.closeStop() + c.conn.Close() + c.workers.Wait() + if c.options.CleanSession && !c.options.AutoReconnect { + c.messageIds.cleanUp() + } + if c.options.AutoReconnect { + c.setConnected(reconnecting) + go c.reconnect() + } else { + c.setConnected(disconnected) + } + if c.options.OnConnectionLost != nil { + go c.options.OnConnectionLost(c, err) + } + } +} + +func (c *client) closeStop() { + c.Lock() + defer c.Unlock() + select { + case <-c.stop: + DEBUG.Println("In disconnect and stop channel is already closed") + default: + if c.stop != nil { + close(c.stop) + } + } +} + +func (c *client) closeStopRouter() { + c.Lock() + defer c.Unlock() + select { + case <-c.stopRouter: + DEBUG.Println("In disconnect and stop channel is already closed") + default: + if c.stopRouter != nil { + close(c.stopRouter) + } + } +} + +func (c *client) closeConn() { + c.Lock() + defer c.Unlock() + if c.conn != nil { + c.conn.Close() + } +} + +func (c *client) disconnect() { + c.closeStop() + c.closeConn() + c.workers.Wait() + c.messageIds.cleanUp() + c.closeStopRouter() + DEBUG.Println(CLI, "disconnected") + c.persist.Close() +} + +// Publish will publish a message with the specified QoS and content +// to the specified topic. +// Returns a token to track delivery of the message to the broker +func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { + token := newToken(packets.Publish).(*PublishToken) + DEBUG.Println(CLI, "enter Publish") + switch { + case !c.IsConnected(): + token.setError(ErrNotConnected) + return token + case c.connectionStatus() == reconnecting && qos == 0: + token.flowComplete() + return token + } + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = qos + pub.TopicName = topic + pub.Retain = retained + switch payload.(type) { + case string: + pub.Payload = []byte(payload.(string)) + case []byte: + pub.Payload = payload.([]byte) + default: + token.setError(fmt.Errorf("Unknown payload type")) + return token + } + + if pub.Qos != 0 && pub.MessageID == 0 { + pub.MessageID = c.getID(token) + token.messageID = pub.MessageID + } + persistOutbound(c.persist, pub) + if c.connectionStatus() == reconnecting { + DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic) + } else { + DEBUG.Println(CLI, "sending publish message, topic:", topic) + c.obound <- &PacketAndToken{p: pub, t: token} + } + return token +} + +// Subscribe starts a new subscription. Provide a MessageHandler to be executed when +// a message is published on the topic provided. +func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token { + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter Subscribe") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if err := validateTopicAndQos(topic, qos); err != nil { + token.setError(err) + return token + } + sub.Topics = append(sub.Topics, topic) + sub.Qoss = append(sub.Qoss, qos) + DEBUG.Println(CLI, sub.String()) + + if strings.HasPrefix(topic, "$share") { + topic = strings.Join(strings.Split(topic, "/")[2:], "/") + } + + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } + + token.subs = append(token.subs, topic) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit Subscribe") + return token +} + +// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to +// be executed when a message is published on one of the topics provided. +func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { + var err error + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter SubscribeMultiple") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { + token.setError(err) + return token + } + + if callback != nil { + for topic := range filters { + c.msgRouter.addRoute(topic, callback) + } + } + token.subs = make([]string, len(sub.Topics)) + copy(token.subs, sub.Topics) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit SubscribeMultiple") + return token +} + +// Load all stored messages and resend them +// Call this to ensure QOS > 1,2 even after an application crash +func (c *client) resume(subscription bool) { + + storedKeys := c.persist.All() + for _, key := range storedKeys { + packet := c.persist.Get(key) + if packet == nil { + continue + } + details := packet.Details() + if isKeyOutbound(key) { + switch packet.(type) { + case *packets.SubscribePacket: + if subscription { + DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID)) + token := newToken(packets.Subscribe).(*SubscribeToken) + c.oboundP <- &PacketAndToken{p: packet, t: token} + } + case *packets.UnsubscribePacket: + if subscription { + DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID)) + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + c.oboundP <- &PacketAndToken{p: packet, t: token} + } + case *packets.PubrelPacket: + DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID)) + select { + case c.oboundP <- &PacketAndToken{p: packet, t: nil}: + case <-c.stop: + } + case *packets.PublishPacket: + token := newToken(packets.Publish).(*PublishToken) + token.messageID = details.MessageID + c.claimID(token, details.MessageID) + DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID)) + DEBUG.Println(STR, details) + c.obound <- &PacketAndToken{p: packet, t: token} + default: + ERROR.Println(STR, "invalid message type in store (discarded)") + c.persist.Del(key) + } + } else { + switch packet.(type) { + case *packets.PubrelPacket, *packets.PublishPacket: + DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID)) + select { + case c.ibound <- packet: + case <-c.stop: + } + default: + ERROR.Println(STR, "invalid message type in store (discarded)") + c.persist.Del(key) + } + } + } +} + +// Unsubscribe will end the subscription from each of the topics provided. +// Messages published to those topics from other clients will no longer be +// received. +func (c *client) Unsubscribe(topics ...string) Token { + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + DEBUG.Println(CLI, "enter Unsubscribe") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) + unsub.Topics = make([]string, len(topics)) + copy(unsub.Topics, topics) + + c.oboundP <- &PacketAndToken{p: unsub, t: token} + for _, topic := range topics { + c.msgRouter.deleteRoute(topic) + } + + DEBUG.Println(CLI, "exit Unsubscribe") + return token +} + +// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions +// in use by the client. +func (c *client) OptionsReader() ClientOptionsReader { + r := ClientOptionsReader{options: &c.options} + return r +} + +//DefaultConnectionLostHandler is a definition of a function that simply +//reports to the DEBUG log the reason for the client losing a connection. +func DefaultConnectionLostHandler(client Client, reason error) { + DEBUG.Println("Connection lost:", reason.Error()) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go new file mode 100644 index 00000000000..01f5fafdf8f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/components.go @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type component string + +// Component names for debug output +const ( + NET component = "[net] " + PNG component = "[pinger] " + CLI component = "[client] " + DEC component = "[decode] " + MES component = "[message] " + STR component = "[store] " + MID component = "[msgids] " + TST component = "[test] " + STA component = "[state] " + ERR component = "[error] " +) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 new file mode 100644 index 00000000000..cf989f1456b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 @@ -0,0 +1,15 @@ + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 new file mode 100644 index 00000000000..79e486c3d2c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 @@ -0,0 +1,70 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and +b) in the case of each subsequent Contributor: +i) changes to the Program, and +ii) additions to the Program; +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and +b) its license agreement: +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and +b) a copy of this Agreement must be included with each copy of the Program. +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go new file mode 100644 index 00000000000..c4a0d36b534 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "io/ioutil" + "os" + "path" + "sort" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + msgExt = ".msg" + tmpExt = ".tmp" + corruptExt = ".CORRUPT" +) + +// FileStore implements the store interface using the filesystem to provide +// true persistence, even across client failure. This is designed to use a +// single directory per running client. If you are running multiple clients +// on the same filesystem, you will need to be careful to specify unique +// store directories for each. +type FileStore struct { + sync.RWMutex + directory string + opened bool +} + +// NewFileStore will create a new FileStore which stores its messages in the +// directory provided. +func NewFileStore(directory string) *FileStore { + store := &FileStore{ + directory: directory, + opened: false, + } + return store +} + +// Open will allow the FileStore to be used. +func (store *FileStore) Open() { + store.Lock() + defer store.Unlock() + // if no store directory was specified in ClientOpts, by default use the + // current working directory + if store.directory == "" { + store.directory, _ = os.Getwd() + } + + // if store dir exists, great, otherwise, create it + if !exists(store.directory) { + perms := os.FileMode(0770) + merr := os.MkdirAll(store.directory, perms) + chkerr(merr) + } + store.opened = true + DEBUG.Println(STR, "store is opened at", store.directory) +} + +// Close will disallow the FileStore from being used. +func (store *FileStore) Close() { + store.Lock() + defer store.Unlock() + store.opened = false + DEBUG.Println(STR, "store is closed") +} + +// Put will put a message into the store, associated with the provided +// key value. +func (store *FileStore) Put(key string, m packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + full := fullpath(store.directory, key) + write(store.directory, key, m) + if !exists(full) { + ERROR.Println(STR, "file not created:", full) + } +} + +// Get will retrieve a message from the store, the one associated with +// the provided key value. +func (store *FileStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + filepath := fullpath(store.directory, key) + if !exists(filepath) { + return nil + } + mfile, oerr := os.Open(filepath) + chkerr(oerr) + msg, rerr := packets.ReadPacket(mfile) + chkerr(mfile.Close()) + + // Message was unreadable, return nil + if rerr != nil { + newpath := corruptpath(store.directory, key) + WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath) + os.Rename(filepath, newpath) + return nil + } + return msg +} + +// All will provide a list of all of the keys associated with messages +// currenly residing in the FileStore. +func (store *FileStore) All() []string { + store.RLock() + defer store.RUnlock() + return store.all() +} + +// Del will remove the persisted message associated with the provided +// key from the FileStore. +func (store *FileStore) Del(key string) { + store.Lock() + defer store.Unlock() + store.del(key) +} + +// Reset will remove all persisted messages from the FileStore. +func (store *FileStore) Reset() { + store.Lock() + defer store.Unlock() + WARN.Println(STR, "FileStore Reset") + for _, key := range store.all() { + store.del(key) + } +} + +// lockless +func (store *FileStore) all() []string { + var err error + var keys []string + var files fileInfos + + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + + files, err = ioutil.ReadDir(store.directory) + chkerr(err) + sort.Sort(files) + for _, f := range files { + DEBUG.Println(STR, "file in All():", f.Name()) + name := f.Name() + if name[len(name)-4:len(name)] != msgExt { + DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name) + continue + } + key := name[0 : len(name)-4] // remove file extension + keys = append(keys, key) + } + return keys +} + +// lockless +func (store *FileStore) del(key string) { + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + DEBUG.Println(STR, "store del filepath:", store.directory) + DEBUG.Println(STR, "store delete key:", key) + filepath := fullpath(store.directory, key) + DEBUG.Println(STR, "path of deletion:", filepath) + if !exists(filepath) { + WARN.Println(STR, "store could not delete key:", key) + return + } + rerr := os.Remove(filepath) + chkerr(rerr) + DEBUG.Println(STR, "del msg:", key) + if exists(filepath) { + ERROR.Println(STR, "file not deleted:", filepath) + } +} + +func fullpath(store string, key string) string { + p := path.Join(store, key+msgExt) + return p +} + +func tmppath(store string, key string) string { + p := path.Join(store, key+tmpExt) + return p +} + +func corruptpath(store string, key string) string { + p := path.Join(store, key+corruptExt) + return p +} + +// create file called "X.[messageid].tmp" located in the store +// the contents of the file is the bytes of the message, then +// rename it to "X.[messageid].msg", overwriting any existing +// message with the same id +// X will be 'i' for inbound messages, and O for outbound messages +func write(store, key string, m packets.ControlPacket) { + temppath := tmppath(store, key) + f, err := os.Create(temppath) + chkerr(err) + werr := m.Write(f) + chkerr(werr) + cerr := f.Close() + chkerr(cerr) + rerr := os.Rename(temppath, fullpath(store, key)) + chkerr(rerr) +} + +func exists(file string) bool { + if _, err := os.Stat(file); err != nil { + if os.IsNotExist(err) { + return false + } + chkerr(err) + } + return true +} + +type fileInfos []os.FileInfo + +func (f fileInfos) Len() int { + return len(f) +} + +func (f fileInfos) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +func (f fileInfos) Less(i, j int) bool { + return f[i].ModTime().Before(f[j].ModTime()) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go new file mode 100644 index 00000000000..499c490bdbb --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// MemoryStore implements the store interface to provide a "persistence" +// mechanism wholly stored in memory. This is only useful for +// as long as the client instance exists. +type MemoryStore struct { + sync.RWMutex + messages map[string]packets.ControlPacket + opened bool +} + +// NewMemoryStore returns a pointer to a new instance of +// MemoryStore, the instance is not initialized and ready to +// use until Open() has been called on it. +func NewMemoryStore() *MemoryStore { + store := &MemoryStore{ + messages: make(map[string]packets.ControlPacket), + opened: false, + } + return store +} + +// Open initializes a MemoryStore instance. +func (store *MemoryStore) Open() { + store.Lock() + defer store.Unlock() + store.opened = true + DEBUG.Println(STR, "memorystore initialized") +} + +// Put takes a key and a pointer to a Message and stores the +// message. +func (store *MemoryStore) Put(key string, message packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + store.messages[key] = message +} + +// Get takes a key and looks in the store for a matching Message +// returning either the Message pointer or nil. +func (store *MemoryStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + CRITICAL.Println(STR, "memorystore get: message", mid, "not found") + } else { + DEBUG.Println(STR, "memorystore get: message", mid, "found") + } + return m +} + +// All returns a slice of strings containing all the keys currently +// in the MemoryStore. +func (store *MemoryStore) All() []string { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + keys := []string{} + for k := range store.messages { + keys = append(keys, k) + } + return keys +} + +// Del takes a key, searches the MemoryStore and if the key is found +// deletes the Message pointer associated with it. +func (store *MemoryStore) Del(key string) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + WARN.Println(STR, "memorystore del: message", mid, "not found") + } else { + delete(store.messages, key) + DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") + } +} + +// Close will disallow modifications to the state of the store. +func (store *MemoryStore) Close() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to close memory store, but not open") + return + } + store.opened = false + DEBUG.Println(STR, "memorystore closed") +} + +// Reset eliminates all persisted message data in the store. +func (store *MemoryStore) Reset() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to reset memory store, but not open") + } + store.messages = make(map[string]packets.ControlPacket) + WARN.Println(STR, "memorystore wiped") +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go new file mode 100644 index 00000000000..903e5dcf5e7 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/message.go @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "net/url" + + "github.com/eclipse/paho.mqtt.golang/packets" + "sync" +) + +// Message defines the externals that a message implementation must support +// these are received messages that are passed to the callbacks, not internal +// messages +type Message interface { + Duplicate() bool + Qos() byte + Retained() bool + Topic() string + MessageID() uint16 + Payload() []byte + Ack() +} + +type message struct { + duplicate bool + qos byte + retained bool + topic string + messageID uint16 + payload []byte + once sync.Once + ack func() +} + +func (m *message) Duplicate() bool { + return m.duplicate +} + +func (m *message) Qos() byte { + return m.qos +} + +func (m *message) Retained() bool { + return m.retained +} + +func (m *message) Topic() string { + return m.topic +} + +func (m *message) MessageID() uint16 { + return m.messageID +} + +func (m *message) Payload() []byte { + return m.payload +} + +func (m *message) Ack() { + m.once.Do(m.ack) +} + +func messageFromPublish(p *packets.PublishPacket, ack func()) Message { + return &message{ + duplicate: p.Dup, + qos: p.Qos, + retained: p.Retain, + topic: p.TopicName, + messageID: p.MessageID, + payload: p.Payload, + ack: ack, + } +} + +func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket { + m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) + + m.CleanSession = options.CleanSession + m.WillFlag = options.WillEnabled + m.WillRetain = options.WillRetained + m.ClientIdentifier = options.ClientID + + if options.WillEnabled { + m.WillQos = options.WillQos + m.WillTopic = options.WillTopic + m.WillMessage = options.WillPayload + } + + username := options.Username + password := options.Password + if broker.User != nil { + username = broker.User.Username() + if pwd, ok := broker.User.Password(); ok { + password = pwd + } + } + if options.CredentialsProvider != nil { + username, password = options.CredentialsProvider() + } + + if username != "" { + m.UsernameFlag = true + m.Username = username + //mustn't have password without user as well + if password != "" { + m.PasswordFlag = true + m.Password = []byte(password) + } + } + + m.Keepalive = uint16(options.KeepAlive) + + return m +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go new file mode 100644 index 00000000000..9a5fa9fd159 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "sync" + "time" +) + +// MId is 16 bit message id as specified by the MQTT spec. +// In general, these values should not be depended upon by +// the client application. +type MId uint16 + +type messageIds struct { + sync.RWMutex + index map[uint16]tokenCompletor +} + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +func (mids *messageIds) cleanUp() { + mids.Lock() + for _, token := range mids.index { + switch token.(type) { + case *PublishToken: + token.setError(fmt.Errorf("Connection lost before Publish completed")) + case *SubscribeToken: + token.setError(fmt.Errorf("Connection lost before Subscribe completed")) + case *UnsubscribeToken: + token.setError(fmt.Errorf("Connection lost before Unsubscribe completed")) + case nil: + continue + } + token.flowComplete() + } + mids.index = make(map[uint16]tokenCompletor) + mids.Unlock() + DEBUG.Println(MID, "cleaned up") +} + +func (mids *messageIds) freeID(id uint16) { + mids.Lock() + delete(mids.index, id) + mids.Unlock() +} + +func (mids *messageIds) claimID(token tokenCompletor, id uint16) { + mids.Lock() + defer mids.Unlock() + if _, ok := mids.index[id]; !ok { + mids.index[id] = token + } else { + old := mids.index[id] + old.flowComplete() + mids.index[id] = token + } +} + +func (mids *messageIds) getID(t tokenCompletor) uint16 { + mids.Lock() + defer mids.Unlock() + for i := midMin; i < midMax; i++ { + if _, ok := mids.index[i]; !ok { + mids.index[i] = t + return i + } + } + return 0 +} + +func (mids *messageIds) getToken(id uint16) tokenCompletor { + mids.RLock() + defer mids.RUnlock() + if token, ok := mids.index[id]; ok { + return token + } + return &DummyToken{id: id} +} + +type DummyToken struct { + id uint16 +} + +func (d *DummyToken) Wait() bool { + return true +} + +func (d *DummyToken) WaitTimeout(t time.Duration) bool { + return true +} + +func (d *DummyToken) flowComplete() { + ERROR.Printf("A lookup for token %d returned nil\n", d.id) +} + +func (d *DummyToken) Error() error { + return nil +} + +func (d *DummyToken) setError(e error) {} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go new file mode 100644 index 00000000000..3e6366be719 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/net.go @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "reflect" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" + "golang.org/x/net/proxy" + "golang.org/x/net/websocket" +) + +func signalError(c chan<- error, err error) { + select { + case c <- err: + default: + } +} + +func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header) (net.Conn, error) { + switch uri.Scheme { + case "ws": + config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("http://%s", uri.Host)) + config.Protocol = []string{"mqtt"} + config.Header = headers + config.Dialer = &net.Dialer{Timeout: timeout} + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "wss": + config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("https://%s", uri.Host)) + config.Protocol = []string{"mqtt"} + config.TlsConfig = tlsc + config.Header = headers + config.Dialer = &net.Dialer{Timeout: timeout} + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "tcp": + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + conn, err := net.DialTimeout("tcp", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + } + proxyDialer := proxy.FromEnvironment() + + conn, err := proxyDialer.Dial("tcp", uri.Host) + if err != nil { + return nil, err + } + return conn, nil + case "unix": + conn, err := net.DialTimeout("unix", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + case "ssl": + fallthrough + case "tls": + fallthrough + case "tcps": + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc) + if err != nil { + return nil, err + } + return conn, nil + } + proxyDialer := proxy.FromEnvironment() + + conn, err := proxyDialer.Dial("tcp", uri.Host) + if err != nil { + return nil, err + } + + tlsConn := tls.Client(conn, tlsc) + + err = tlsConn.Handshake() + if err != nil { + conn.Close() + return nil, err + } + + return tlsConn, nil + } + return nil, errors.New("Unknown protocol") +} + +// actually read incoming messages off the wire +// send Message object into ibound channel +func incoming(c *client) { + var err error + var cp packets.ControlPacket + + defer c.workers.Done() + + DEBUG.Println(NET, "incoming started") + + for { + if cp, err = packets.ReadPacket(c.conn); err != nil { + break + } + DEBUG.Println(NET, "Received Message") + select { + case c.ibound <- cp: + // Notify keepalive logic that we recently received a packet + if c.options.KeepAlive != 0 { + c.lastReceived.Store(time.Now()) + } + case <-c.stop: + // This avoids a deadlock should a message arrive while shutting down. + // In that case the "reader" of c.ibound might already be gone + WARN.Println(NET, "incoming dropped a received message during shutdown") + break + } + } + // We received an error on read. + // If disconnect is in progress, swallow error and return + select { + case <-c.stop: + DEBUG.Println(NET, "incoming stopped") + return + // Not trying to disconnect, send the error to the errors channel + default: + ERROR.Println(NET, "incoming stopped with error", err) + signalError(c.errors, err) + return + } +} + +// receive a Message object on obound, and then +// actually send outgoing message to the wire +func outgoing(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "outgoing started") + + for { + DEBUG.Println(NET, "outgoing waiting for an outbound message") + select { + case <-c.stop: + DEBUG.Println(NET, "outgoing stopped") + return + case pub := <-c.obound: + msg := pub.p.(*packets.PublishPacket) + + if c.options.WriteTimeout > 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout)) + } + + if err := msg.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + pub.t.setError(err) + signalError(c.errors, err) + return + } + + if c.options.WriteTimeout > 0 { + // If we successfully wrote, we don't want the timeout to happen during an idle period + // so we reset it to infinite. + c.conn.SetWriteDeadline(time.Time{}) + } + + if msg.Qos == 0 { + pub.t.flowComplete() + } + DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) + case msg := <-c.oboundP: + switch msg.p.(type) { + case *packets.SubscribePacket: + msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t) + case *packets.UnsubscribePacket: + msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t) + } + DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) + if err := msg.p.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + if msg.t != nil { + msg.t.setError(err) + } + signalError(c.errors, err) + return + } + switch msg.p.(type) { + case *packets.DisconnectPacket: + msg.t.(*DisconnectToken).flowComplete() + DEBUG.Println(NET, "outbound wrote disconnect, stopping") + return + } + } + // Reset ping timer after sending control packet. + if c.options.KeepAlive != 0 { + c.lastSent.Store(time.Now()) + } + } +} + +// receive Message objects on ibound +// store messages if necessary +// send replies on obound +// delete messages from store if necessary +func alllogic(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "logic started") + + for { + DEBUG.Println(NET, "logic waiting for msg on ibound") + + select { + case msg := <-c.ibound: + DEBUG.Println(NET, "logic got msg on ibound") + persistInbound(c.persist, msg) + switch m := msg.(type) { + case *packets.PingrespPacket: + DEBUG.Println(NET, "received pingresp") + atomic.StoreInt32(&c.pingOutstanding, 0) + case *packets.SubackPacket: + DEBUG.Println(NET, "received suback, id:", m.MessageID) + token := c.getToken(m.MessageID) + switch t := token.(type) { + case *SubscribeToken: + DEBUG.Println(NET, "granted qoss", m.ReturnCodes) + for i, qos := range m.ReturnCodes { + t.subResult[t.subs[i]] = qos + } + } + token.flowComplete() + c.freeID(m.MessageID) + case *packets.UnsubackPacket: + DEBUG.Println(NET, "received unsuback, id:", m.MessageID) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + case *packets.PublishPacket: + DEBUG.Println(NET, "received publish, msgId:", m.MessageID) + DEBUG.Println(NET, "putting msg on onPubChan") + switch m.Qos { + case 2: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + case 1: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + case 0: + select { + case c.incomingPubChan <- m: + case <-c.stop: + } + DEBUG.Println(NET, "done putting msg on incomingPubChan") + } + case *packets.PubackPacket: + DEBUG.Println(NET, "received puback, id:", m.MessageID) + // c.receipts.get(msg.MsgId()) <- Receipt{} + // c.receipts.end(msg.MsgId()) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + case *packets.PubrecPacket: + DEBUG.Println(NET, "received pubrec, id:", m.MessageID) + prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + prel.MessageID = m.MessageID + select { + case c.oboundP <- &PacketAndToken{p: prel, t: nil}: + case <-c.stop: + } + case *packets.PubrelPacket: + DEBUG.Println(NET, "received pubrel, id:", m.MessageID) + pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + pc.MessageID = m.MessageID + persistOutbound(c.persist, pc) + select { + case c.oboundP <- &PacketAndToken{p: pc, t: nil}: + case <-c.stop: + } + case *packets.PubcompPacket: + DEBUG.Println(NET, "received pubcomp, id:", m.MessageID) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + } + case <-c.stop: + WARN.Println(NET, "logic stopped") + return + } + } +} + +func (c *client) ackFunc(packet *packets.PublishPacket) func() { + return func() { + switch packet.Qos { + case 2: + pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + pr.MessageID = packet.MessageID + DEBUG.Println(NET, "putting pubrec msg on obound") + select { + case c.oboundP <- &PacketAndToken{p: pr, t: nil}: + case <-c.stop: + } + DEBUG.Println(NET, "done putting pubrec msg on obound") + case 1: + pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + pa.MessageID = packet.MessageID + DEBUG.Println(NET, "putting puback msg on obound") + persistOutbound(c.persist, pa) + select { + case c.oboundP <- &PacketAndToken{p: pa, t: nil}: + case <-c.stop: + } + DEBUG.Println(NET, "done putting puback msg on obound") + case 0: + // do nothing, since there is no need to send an ack packet back + } + } +} + +func errorWatch(c *client) { + defer c.workers.Done() + select { + case <-c.stop: + WARN.Println(NET, "errorWatch stopped") + return + case err := <-c.errors: + ERROR.Println(NET, "error triggered, stopping") + go c.internalConnLost(err) + return + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/notice.html b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html new file mode 100644 index 00000000000..f19c483b9c8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html @@ -0,0 +1,108 @@ + + + + + +Eclipse Foundation Software User Agreement + + + +

Eclipse Foundation Software User Agreement

+

February 1, 2011

+ +

Usage Of Content

+ +

THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS + (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND + CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE + OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR + NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND + CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.

+ +

Applicable Licenses

+ +

Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0 + ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html. + For purposes of the EPL, "Program" will mean the Content.

+ +

Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code + repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").

+ +
    +
  • Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
  • +
  • Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
  • +
  • A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins + and/or Fragments associated with that Feature.
  • +
  • Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
  • +
+ +

The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and +Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module +including, but not limited to the following locations:

+ +
    +
  • The top-level (root) directory
  • +
  • Plug-in and Fragment directories
  • +
  • Inside Plug-ins and Fragments packaged as JARs
  • +
  • Sub-directories of the directory named "src" of certain Plug-ins
  • +
  • Feature directories
  • +
+ +

Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the +installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or +inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature. +Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in +that directory.

+ +

THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE +OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):

+ + + +

IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please +contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.

+ + +

Use of Provisioning Technology

+ +

The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse + Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or + other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to + install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html + ("Specification").

+ +

You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the + applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology + in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the + Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:

+ +
    +
  1. A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology + on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based + product.
  2. +
  3. During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be + accessed and copied to the Target Machine.
  4. +
  5. Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable + Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target + Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern + the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such + indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
  6. +
+ +

Cryptography

+ +

Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to + another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, + possession, or use, and re-export of encryption software, to see if this is permitted.

+ +

Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.

+ + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go new file mode 100644 index 00000000000..39630d7f28a --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +func chkerr(e error) { + if e != nil { + panic(e) + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go new file mode 100644 index 00000000000..e96e9ed7c2b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options.go @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Portions copyright © 2018 TIBCO Software Inc. + +package mqtt + +import ( + "crypto/tls" + "net/http" + "net/url" + "strings" + "time" +) + +// CredentialsProvider allows the username and password to be updated +// before reconnecting. It should return the current username and password. +type CredentialsProvider func() (username string, password string) + +// MessageHandler is a callback type which can be set to be +// executed upon the arrival of messages published to topics +// to which the client is subscribed. +type MessageHandler func(Client, Message) + +// ConnectionLostHandler is a callback type which can be set to be +// executed upon an unintended disconnection from the MQTT broker. +// Disconnects caused by calling Disconnect or ForceDisconnect will +// not cause an OnConnectionLost callback to execute. +type ConnectionLostHandler func(Client, error) + +// OnConnectHandler is a callback that is called when the client +// state changes from unconnected/disconnected to connected. Both +// at initial connection and on reconnection +type OnConnectHandler func(Client) + +// ClientOptions contains configurable options for an Client. +type ClientOptions struct { + Servers []*url.URL + ClientID string + Username string + Password string + CredentialsProvider CredentialsProvider + CleanSession bool + Order bool + WillEnabled bool + WillTopic string + WillPayload []byte + WillQos byte + WillRetained bool + ProtocolVersion uint + protocolVersionExplicit bool + TLSConfig *tls.Config + KeepAlive int64 + PingTimeout time.Duration + ConnectTimeout time.Duration + MaxReconnectInterval time.Duration + AutoReconnect bool + Store Store + DefaultPublishHandler MessageHandler + OnConnect OnConnectHandler + OnConnectionLost ConnectionLostHandler + WriteTimeout time.Duration + MessageChannelDepth uint + ResumeSubs bool + HTTPHeaders http.Header +} + +// NewClientOptions will create a new ClientClientOptions type with some +// default values. +// Port: 1883 +// CleanSession: True +// Order: True +// KeepAlive: 30 (seconds) +// ConnectTimeout: 30 (seconds) +// MaxReconnectInterval 10 (minutes) +// AutoReconnect: True +func NewClientOptions() *ClientOptions { + o := &ClientOptions{ + Servers: nil, + ClientID: "", + Username: "", + Password: "", + CleanSession: true, + Order: true, + WillEnabled: false, + WillTopic: "", + WillPayload: nil, + WillQos: 0, + WillRetained: false, + ProtocolVersion: 0, + protocolVersionExplicit: false, + KeepAlive: 30, + PingTimeout: 10 * time.Second, + ConnectTimeout: 30 * time.Second, + MaxReconnectInterval: 10 * time.Minute, + AutoReconnect: true, + Store: nil, + OnConnect: nil, + OnConnectionLost: DefaultConnectionLostHandler, + WriteTimeout: 0, // 0 represents timeout disabled + MessageChannelDepth: 100, + ResumeSubs: false, + HTTPHeaders: make(map[string][]string), + } + return o +} + +// AddBroker adds a broker URI to the list of brokers to be used. The format should be +// scheme://host:port +// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) +// and "port" is the port on which the broker is accepting connections. +// +// Default values for hostname is "127.0.0.1", for schema is "tcp://". +// +// An example broker URI would look like: tcp://foobar.com:1883 +func (o *ClientOptions) AddBroker(server string) *ClientOptions { + if len(server) > 0 && server[0] == ':' { + server = "127.0.0.1" + server + } + if !strings.Contains(server, "://") { + server = "tcp://" + server + } + brokerURI, err := url.Parse(server) + if err != nil { + ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err) + return o + } + o.Servers = append(o.Servers, brokerURI) + return o +} + +// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting +// but not reconnecting if CleanSession is false. Otherwise these messages are discarded. +func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions { + o.ResumeSubs = resume + return o +} + +// SetClientID will set the client id to be used by this client when +// connecting to the MQTT broker. According to the MQTT v3.1 specification, +// a client id mus be no longer than 23 characters. +func (o *ClientOptions) SetClientID(id string) *ClientOptions { + o.ClientID = id + return o +} + +// SetUsername will set the username to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetUsername(u string) *ClientOptions { + o.Username = u + return o +} + +// SetPassword will set the password to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetPassword(p string) *ClientOptions { + o.Password = p + return o +} + +// SetCredentialsProvider will set a method to be called by this client when +// connecting to the MQTT broker that provide the current username and password. +// Note: without the use of SSL/TLS, this information will be sent +// in plaintext accross the wire. +func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions { + o.CredentialsProvider = p + return o +} + +// SetCleanSession will set the "clean session" flag in the connect message +// when this client connects to an MQTT broker. By setting this flag, you are +// indicating that no messages saved by the broker for this client should be +// delivered. Any messages that were going to be sent by this client before +// diconnecting previously but didn't will not be sent upon connecting to the +// broker. +func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { + o.CleanSession = clean + return o +} + +// SetOrderMatters will set the message routing to guarantee order within +// each QoS level. By default, this value is true. If set to false, +// this flag indicates that messages can be delivered asynchronously +// from the client to the application and possibly arrive out of order. +func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { + o.Order = order + return o +} + +// SetTLSConfig will set an SSL/TLS configuration to be used when connecting +// to an MQTT broker. Please read the official Go documentation for more +// information. +func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { + o.TLSConfig = t + return o +} + +// SetStore will set the implementation of the Store interface +// used to provide message persistence in cases where QoS levels +// QoS_ONE or QoS_TWO are used. If no store is provided, then the +// client will use MemoryStore by default. +func (o *ClientOptions) SetStore(s Store) *ClientOptions { + o.Store = s + return o +} + +// SetKeepAlive will set the amount of time (in seconds) that the client +// should wait before sending a PING request to the broker. This will +// allow the client to know that a connection has not been lost with the +// server. +func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { + o.KeepAlive = int64(k / time.Second) + return o +} + +// SetPingTimeout will set the amount of time (in seconds) that the client +// will wait after sending a PING request to the broker, before deciding +// that the connection has been lost. Default is 10 seconds. +func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions { + o.PingTimeout = k + return o +} + +// SetProtocolVersion sets the MQTT version to be used to connect to the +// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 +func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { + if (pv >= 3 && pv <= 4) || (pv > 0x80) { + o.ProtocolVersion = pv + o.protocolVersionExplicit = true + } + return o +} + +// UnsetWill will cause any set will message to be disregarded. +func (o *ClientOptions) UnsetWill() *ClientOptions { + o.WillEnabled = false + return o +} + +// SetWill accepts a string will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { + o.SetBinaryWill(topic, []byte(payload), qos, retained) + return o +} + +// SetBinaryWill accepts a []byte will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { + o.WillEnabled = true + o.WillTopic = topic + o.WillPayload = payload + o.WillQos = qos + o.WillRetained = retained + return o +} + +// SetDefaultPublishHandler sets the MessageHandler that will be called when a message +// is received that does not match any known subscriptions. +func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { + o.DefaultPublishHandler = defaultHandler + return o +} + +// SetOnConnectHandler sets the function to be called when the client is connected. Both +// at initial connection time and upon automatic reconnect. +func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { + o.OnConnect = onConn + return o +} + +// SetConnectionLostHandler will set the OnConnectionLost callback to be executed +// in the case where the client unexpectedly loses connection with the MQTT broker. +func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { + o.OnConnectionLost = onLost + return o +} + +// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a +// timeout error. A duration of 0 never times out. Default 30 seconds +func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { + o.WriteTimeout = t + return o +} + +// SetConnectTimeout limits how long the client will wait when trying to open a connection +// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out. +// Default 30 seconds. Currently only operational on TCP/TLS connections. +func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { + o.ConnectTimeout = t + return o +} + +// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts +// when connection is lost +func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { + o.MaxReconnectInterval = t + return o +} + +// SetAutoReconnect sets whether the automatic reconnection logic should be used +// when the connection is lost, even if disabled the ConnectionLostHandler is still +// called +func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { + o.AutoReconnect = a + return o +} + +// SetMessageChannelDepth sets the size of the internal queue that holds messages while the +// client is temporairily offline, allowing the application to publish when the client is +// reconnecting. This setting is only valid if AutoReconnect is set to true, it is otherwise +// ignored. +func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions { + o.MessageChannelDepth = s + return o +} + +// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket +// opening handshake. +func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions { + o.HTTPHeaders = h + return o +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go new file mode 100644 index 00000000000..60144b93c86 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" +) + +// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized. +type ClientOptionsReader struct { + options *ClientOptions +} + +//Servers returns a slice of the servers defined in the clientoptions +func (r *ClientOptionsReader) Servers() []*url.URL { + s := make([]*url.URL, len(r.options.Servers)) + + for i, u := range r.options.Servers { + nu := *u + s[i] = &nu + } + + return s +} + +//ResumeSubs returns true if resuming stored (un)sub is enabled +func (r *ClientOptionsReader) ResumeSubs() bool { + s := r.options.ResumeSubs + return s +} + +//ClientID returns the set client id +func (r *ClientOptionsReader) ClientID() string { + s := r.options.ClientID + return s +} + +//Username returns the set username +func (r *ClientOptionsReader) Username() string { + s := r.options.Username + return s +} + +//Password returns the set password +func (r *ClientOptionsReader) Password() string { + s := r.options.Password + return s +} + +//CleanSession returns whether Cleansession is set +func (r *ClientOptionsReader) CleanSession() bool { + s := r.options.CleanSession + return s +} + +func (r *ClientOptionsReader) Order() bool { + s := r.options.Order + return s +} + +func (r *ClientOptionsReader) WillEnabled() bool { + s := r.options.WillEnabled + return s +} + +func (r *ClientOptionsReader) WillTopic() string { + s := r.options.WillTopic + return s +} + +func (r *ClientOptionsReader) WillPayload() []byte { + s := r.options.WillPayload + return s +} + +func (r *ClientOptionsReader) WillQos() byte { + s := r.options.WillQos + return s +} + +func (r *ClientOptionsReader) WillRetained() bool { + s := r.options.WillRetained + return s +} + +func (r *ClientOptionsReader) ProtocolVersion() uint { + s := r.options.ProtocolVersion + return s +} + +func (r *ClientOptionsReader) TLSConfig() *tls.Config { + s := r.options.TLSConfig + return s +} + +func (r *ClientOptionsReader) KeepAlive() time.Duration { + s := time.Duration(r.options.KeepAlive * int64(time.Second)) + return s +} + +func (r *ClientOptionsReader) PingTimeout() time.Duration { + s := r.options.PingTimeout + return s +} + +func (r *ClientOptionsReader) ConnectTimeout() time.Duration { + s := r.options.ConnectTimeout + return s +} + +func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration { + s := r.options.MaxReconnectInterval + return s +} + +func (r *ClientOptionsReader) AutoReconnect() bool { + s := r.options.AutoReconnect + return s +} + +func (r *ClientOptionsReader) WriteTimeout() time.Duration { + s := r.options.WriteTimeout + return s +} + +func (r *ClientOptionsReader) MessageChannelDepth() uint { + s := r.options.MessageChannelDepth + return s +} + +func (r *ClientOptionsReader) HTTPHeaders() http.Header { + h := r.options.HTTPHeaders + return h +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go new file mode 100644 index 00000000000..25cf30f63d6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go @@ -0,0 +1,55 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnackPacket is an internal representation of the fields of the +//Connack MQTT packet +type ConnackPacket struct { + FixedHeader + SessionPresent bool + ReturnCode byte +} + +func (ca *ConnackPacket) String() string { + str := fmt.Sprintf("%s", ca.FixedHeader) + str += " " + str += fmt.Sprintf("sessionpresent: %t returncode: %d", ca.SessionPresent, ca.ReturnCode) + return str +} + +func (ca *ConnackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.WriteByte(boolToByte(ca.SessionPresent)) + body.WriteByte(ca.ReturnCode) + ca.FixedHeader.RemainingLength = 2 + packet := ca.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ca *ConnackPacket) Unpack(b io.Reader) error { + flags, err := decodeByte(b) + if err != nil { + return err + } + ca.SessionPresent = 1&flags > 0 + ca.ReturnCode, err = decodeByte(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ca *ConnackPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go new file mode 100644 index 00000000000..cb03ebc0730 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go @@ -0,0 +1,154 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnectPacket is an internal representation of the fields of the +//Connect MQTT packet +type ConnectPacket struct { + FixedHeader + ProtocolName string + ProtocolVersion byte + CleanSession bool + WillFlag bool + WillQos byte + WillRetain bool + UsernameFlag bool + PasswordFlag bool + ReservedBit byte + Keepalive uint16 + + ClientIdentifier string + WillTopic string + WillMessage []byte + Username string + Password []byte +} + +func (c *ConnectPacket) String() string { + str := fmt.Sprintf("%s", c.FixedHeader) + str += " " + str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password) + return str +} + +func (c *ConnectPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(c.ProtocolName)) + body.WriteByte(c.ProtocolVersion) + body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) + body.Write(encodeUint16(c.Keepalive)) + body.Write(encodeString(c.ClientIdentifier)) + if c.WillFlag { + body.Write(encodeString(c.WillTopic)) + body.Write(encodeBytes(c.WillMessage)) + } + if c.UsernameFlag { + body.Write(encodeString(c.Username)) + } + if c.PasswordFlag { + body.Write(encodeBytes(c.Password)) + } + c.FixedHeader.RemainingLength = body.Len() + packet := c.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (c *ConnectPacket) Unpack(b io.Reader) error { + var err error + c.ProtocolName, err = decodeString(b) + if err != nil { + return err + } + c.ProtocolVersion, err = decodeByte(b) + if err != nil { + return err + } + options, err := decodeByte(b) + if err != nil { + return err + } + c.ReservedBit = 1 & options + c.CleanSession = 1&(options>>1) > 0 + c.WillFlag = 1&(options>>2) > 0 + c.WillQos = 3 & (options >> 3) + c.WillRetain = 1&(options>>5) > 0 + c.PasswordFlag = 1&(options>>6) > 0 + c.UsernameFlag = 1&(options>>7) > 0 + c.Keepalive, err = decodeUint16(b) + if err != nil { + return err + } + c.ClientIdentifier, err = decodeString(b) + if err != nil { + return err + } + if c.WillFlag { + c.WillTopic, err = decodeString(b) + if err != nil { + return err + } + c.WillMessage, err = decodeBytes(b) + if err != nil { + return err + } + } + if c.UsernameFlag { + c.Username, err = decodeString(b) + if err != nil { + return err + } + } + if c.PasswordFlag { + c.Password, err = decodeBytes(b) + if err != nil { + return err + } + } + + return nil +} + +//Validate performs validation of the fields of a Connect packet +func (c *ConnectPacket) Validate() byte { + if c.PasswordFlag && !c.UsernameFlag { + return ErrRefusedBadUsernameOrPassword + } + if c.ReservedBit != 0 { + //Bad reserved bit + return ErrProtocolViolation + } + if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { + //Mismatched or unsupported protocol version + return ErrRefusedBadProtocolVersion + } + if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { + //Bad protocol name + return ErrProtocolViolation + } + if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { + //Bad size field + return ErrProtocolViolation + } + if len(c.ClientIdentifier) == 0 && !c.CleanSession { + //Bad client identifier + return ErrRefusedIDRejected + } + return Accepted +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (c *ConnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go new file mode 100644 index 00000000000..e5c1869207c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//DisconnectPacket is an internal representation of the fields of the +//Disconnect MQTT packet +type DisconnectPacket struct { + FixedHeader +} + +func (d *DisconnectPacket) String() string { + str := fmt.Sprintf("%s", d.FixedHeader) + return str +} + +func (d *DisconnectPacket) Write(w io.Writer) error { + packet := d.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (d *DisconnectPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (d *DisconnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go new file mode 100644 index 00000000000..42eeb46d39c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go @@ -0,0 +1,346 @@ +package packets + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" +) + +//ControlPacket defines the interface for structs intended to hold +//decoded MQTT packets, either from being read or before being +//written +type ControlPacket interface { + Write(io.Writer) error + Unpack(io.Reader) error + String() string + Details() Details +} + +//PacketNames maps the constants for each of the MQTT packet types +//to a string representation of their name. +var PacketNames = map[uint8]string{ + 1: "CONNECT", + 2: "CONNACK", + 3: "PUBLISH", + 4: "PUBACK", + 5: "PUBREC", + 6: "PUBREL", + 7: "PUBCOMP", + 8: "SUBSCRIBE", + 9: "SUBACK", + 10: "UNSUBSCRIBE", + 11: "UNSUBACK", + 12: "PINGREQ", + 13: "PINGRESP", + 14: "DISCONNECT", +} + +//Below are the constants assigned to each of the MQTT packet types +const ( + Connect = 1 + Connack = 2 + Publish = 3 + Puback = 4 + Pubrec = 5 + Pubrel = 6 + Pubcomp = 7 + Subscribe = 8 + Suback = 9 + Unsubscribe = 10 + Unsuback = 11 + Pingreq = 12 + Pingresp = 13 + Disconnect = 14 +) + +//Below are the const definitions for error codes returned by +//Connect() +const ( + Accepted = 0x00 + ErrRefusedBadProtocolVersion = 0x01 + ErrRefusedIDRejected = 0x02 + ErrRefusedServerUnavailable = 0x03 + ErrRefusedBadUsernameOrPassword = 0x04 + ErrRefusedNotAuthorised = 0x05 + ErrNetworkError = 0xFE + ErrProtocolViolation = 0xFF +) + +//ConnackReturnCodes is a map of the error codes constants for Connect() +//to a string representation of the error +var ConnackReturnCodes = map[uint8]string{ + 0: "Connection Accepted", + 1: "Connection Refused: Bad Protocol Version", + 2: "Connection Refused: Client Identifier Rejected", + 3: "Connection Refused: Server Unavailable", + 4: "Connection Refused: Username or Password in unknown format", + 5: "Connection Refused: Not Authorised", + 254: "Connection Error", + 255: "Connection Refused: Protocol Violation", +} + +//ConnErrors is a map of the errors codes constants for Connect() +//to a Go error +var ConnErrors = map[byte]error{ + Accepted: nil, + ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"), + ErrRefusedIDRejected: errors.New("Identifier rejected"), + ErrRefusedServerUnavailable: errors.New("Server Unavailable"), + ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"), + ErrRefusedNotAuthorised: errors.New("Not Authorized"), + ErrNetworkError: errors.New("Network Error"), + ErrProtocolViolation: errors.New("Protocol Violation"), +} + +//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts +//to read an MQTT packet from the stream. It returns a ControlPacket +//representing the decoded MQTT packet and an error. One of these returns will +//always be nil, a nil ControlPacket indicating an error occurred. +func ReadPacket(r io.Reader) (ControlPacket, error) { + var fh FixedHeader + b := make([]byte, 1) + + _, err := io.ReadFull(r, b) + if err != nil { + return nil, err + } + + err = fh.unpack(b[0], r) + if err != nil { + return nil, err + } + + cp, err := NewControlPacketWithHeader(fh) + if err != nil { + return nil, err + } + + packetBytes := make([]byte, fh.RemainingLength) + n, err := io.ReadFull(r, packetBytes) + if err != nil { + return nil, err + } + if n != fh.RemainingLength { + return nil, errors.New("Failed to read expected data") + } + + err = cp.Unpack(bytes.NewBuffer(packetBytes)) + return cp, err +} + +//NewControlPacket is used to create a new ControlPacket of the type specified +//by packetType, this is usually done by reference to the packet type constants +//defined in packets.go. The newly created ControlPacket is empty and a pointer +//is returned. +func NewControlPacket(packetType byte) ControlPacket { + switch packetType { + case Connect: + return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}} + case Connack: + return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}} + case Disconnect: + return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}} + case Publish: + return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}} + case Puback: + return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}} + case Pubrec: + return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}} + case Pubrel: + return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}} + case Pubcomp: + return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}} + case Subscribe: + return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}} + case Suback: + return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}} + case Unsubscribe: + return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}} + case Unsuback: + return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}} + case Pingreq: + return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}} + case Pingresp: + return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}} + } + return nil +} + +//NewControlPacketWithHeader is used to create a new ControlPacket of the type +//specified within the FixedHeader that is passed to the function. +//The newly created ControlPacket is empty and a pointer is returned. +func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) { + switch fh.MessageType { + case Connect: + return &ConnectPacket{FixedHeader: fh}, nil + case Connack: + return &ConnackPacket{FixedHeader: fh}, nil + case Disconnect: + return &DisconnectPacket{FixedHeader: fh}, nil + case Publish: + return &PublishPacket{FixedHeader: fh}, nil + case Puback: + return &PubackPacket{FixedHeader: fh}, nil + case Pubrec: + return &PubrecPacket{FixedHeader: fh}, nil + case Pubrel: + return &PubrelPacket{FixedHeader: fh}, nil + case Pubcomp: + return &PubcompPacket{FixedHeader: fh}, nil + case Subscribe: + return &SubscribePacket{FixedHeader: fh}, nil + case Suback: + return &SubackPacket{FixedHeader: fh}, nil + case Unsubscribe: + return &UnsubscribePacket{FixedHeader: fh}, nil + case Unsuback: + return &UnsubackPacket{FixedHeader: fh}, nil + case Pingreq: + return &PingreqPacket{FixedHeader: fh}, nil + case Pingresp: + return &PingrespPacket{FixedHeader: fh}, nil + } + return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType) +} + +//Details struct returned by the Details() function called on +//ControlPackets to present details of the Qos and MessageID +//of the ControlPacket +type Details struct { + Qos byte + MessageID uint16 +} + +//FixedHeader is a struct to hold the decoded information from +//the fixed header of an MQTT ControlPacket +type FixedHeader struct { + MessageType byte + Dup bool + Qos byte + Retain bool + RemainingLength int +} + +func (fh FixedHeader) String() string { + return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) +} + +func boolToByte(b bool) byte { + switch b { + case true: + return 1 + default: + return 0 + } +} + +func (fh *FixedHeader) pack() bytes.Buffer { + var header bytes.Buffer + header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) + header.Write(encodeLength(fh.RemainingLength)) + return header +} + +func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error { + fh.MessageType = typeAndFlags >> 4 + fh.Dup = (typeAndFlags>>3)&0x01 > 0 + fh.Qos = (typeAndFlags >> 1) & 0x03 + fh.Retain = typeAndFlags&0x01 > 0 + + var err error + fh.RemainingLength, err = decodeLength(r) + return err +} + +func decodeByte(b io.Reader) (byte, error) { + num := make([]byte, 1) + _, err := b.Read(num) + if err != nil { + return 0, err + } + + return num[0], nil +} + +func decodeUint16(b io.Reader) (uint16, error) { + num := make([]byte, 2) + _, err := b.Read(num) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(num), nil +} + +func encodeUint16(num uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, num) + return bytes +} + +func encodeString(field string) []byte { + return encodeBytes([]byte(field)) +} + +func decodeString(b io.Reader) (string, error) { + buf, err := decodeBytes(b) + return string(buf), err +} + +func decodeBytes(b io.Reader) ([]byte, error) { + fieldLength, err := decodeUint16(b) + if err != nil { + return nil, err + } + + field := make([]byte, fieldLength) + _, err = b.Read(field) + if err != nil { + return nil, err + } + + return field, nil +} + +func encodeBytes(field []byte) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +} + +func encodeLength(length int) []byte { + var encLength []byte + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + encLength = append(encLength, digit) + if length == 0 { + break + } + } + return encLength +} + +func decodeLength(r io.Reader) (int, error) { + var rLength uint32 + var multiplier uint32 + b := make([]byte, 1) + for multiplier < 27 { //fix: Infinite '(digit & 128) == 1' will cause the dead loop + _, err := io.ReadFull(r, b) + if err != nil { + return 0, err + } + + digit := b[0] + rLength |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(rLength), nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go new file mode 100644 index 00000000000..5c3e88f9408 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingreqPacket is an internal representation of the fields of the +//Pingreq MQTT packet +type PingreqPacket struct { + FixedHeader +} + +func (pr *PingreqPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingreqPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingreqPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingreqPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go new file mode 100644 index 00000000000..39ebc001e66 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingrespPacket is an internal representation of the fields of the +//Pingresp MQTT packet +type PingrespPacket struct { + FixedHeader +} + +func (pr *PingrespPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingrespPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingrespPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingrespPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go new file mode 100644 index 00000000000..7c0cd7efdd1 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubackPacket is an internal representation of the fields of the +//Puback MQTT packet +type PubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (pa *PubackPacket) String() string { + str := fmt.Sprintf("%s", pa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pa.MessageID) + return str +} + +func (pa *PubackPacket) Write(w io.Writer) error { + var err error + pa.FixedHeader.RemainingLength = 2 + packet := pa.FixedHeader.pack() + packet.Write(encodeUint16(pa.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pa *PubackPacket) Unpack(b io.Reader) error { + var err error + pa.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pa *PubackPacket) Details() Details { + return Details{Qos: pa.Qos, MessageID: pa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go new file mode 100644 index 00000000000..4f6f6e216e1 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubcompPacket is an internal representation of the fields of the +//Pubcomp MQTT packet +type PubcompPacket struct { + FixedHeader + MessageID uint16 +} + +func (pc *PubcompPacket) String() string { + str := fmt.Sprintf("%s", pc.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pc.MessageID) + return str +} + +func (pc *PubcompPacket) Write(w io.Writer) error { + var err error + pc.FixedHeader.RemainingLength = 2 + packet := pc.FixedHeader.pack() + packet.Write(encodeUint16(pc.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pc *PubcompPacket) Unpack(b io.Reader) error { + var err error + pc.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pc *PubcompPacket) Details() Details { + return Details{Qos: pc.Qos, MessageID: pc.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go new file mode 100644 index 00000000000..adc9adb9c0d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//PublishPacket is an internal representation of the fields of the +//Publish MQTT packet +type PublishPacket struct { + FixedHeader + TopicName string + MessageID uint16 + Payload []byte +} + +func (p *PublishPacket) String() string { + str := fmt.Sprintf("%s", p.FixedHeader) + str += " " + str += fmt.Sprintf("topicName: %s MessageID: %d", p.TopicName, p.MessageID) + str += " " + str += fmt.Sprintf("payload: %s", string(p.Payload)) + return str +} + +func (p *PublishPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(p.TopicName)) + if p.Qos > 0 { + body.Write(encodeUint16(p.MessageID)) + } + p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) + packet := p.FixedHeader.pack() + packet.Write(body.Bytes()) + packet.Write(p.Payload) + _, err = w.Write(packet.Bytes()) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (p *PublishPacket) Unpack(b io.Reader) error { + var payloadLength = p.FixedHeader.RemainingLength + var err error + p.TopicName, err = decodeString(b) + if err != nil { + return err + } + + if p.Qos > 0 { + p.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + payloadLength -= len(p.TopicName) + 4 + } else { + payloadLength -= len(p.TopicName) + 2 + } + if payloadLength < 0 { + return fmt.Errorf("Error unpacking publish, payload length < 0") + } + p.Payload = make([]byte, payloadLength) + _, err = b.Read(p.Payload) + + return err +} + +//Copy creates a new PublishPacket with the same topic and payload +//but an empty fixed header, useful for when you want to deliver +//a message with different properties such as Qos but the same +//content +func (p *PublishPacket) Copy() *PublishPacket { + newP := NewControlPacket(Publish).(*PublishPacket) + newP.TopicName = p.TopicName + newP.Payload = p.Payload + + return newP +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (p *PublishPacket) Details() Details { + return Details{Qos: p.Qos, MessageID: p.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go new file mode 100644 index 00000000000..483372b0722 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrecPacket is an internal representation of the fields of the +//Pubrec MQTT packet +type PubrecPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrecPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrecPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrecPacket) Unpack(b io.Reader) error { + var err error + pr.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrecPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go new file mode 100644 index 00000000000..8590fd976ce --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrelPacket is an internal representation of the fields of the +//Pubrel MQTT packet +type PubrelPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrelPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrelPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrelPacket) Unpack(b io.Reader) error { + var err error + pr.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrelPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go new file mode 100644 index 00000000000..fc0572475ad --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go @@ -0,0 +1,60 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubackPacket is an internal representation of the fields of the +//Suback MQTT packet +type SubackPacket struct { + FixedHeader + MessageID uint16 + ReturnCodes []byte +} + +func (sa *SubackPacket) String() string { + str := fmt.Sprintf("%s", sa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", sa.MessageID) + return str +} + +func (sa *SubackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(sa.MessageID)) + body.Write(sa.ReturnCodes) + sa.FixedHeader.RemainingLength = body.Len() + packet := sa.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (sa *SubackPacket) Unpack(b io.Reader) error { + var qosBuffer bytes.Buffer + var err error + sa.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + + _, err = qosBuffer.ReadFrom(b) + if err != nil { + return err + } + sa.ReturnCodes = qosBuffer.Bytes() + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (sa *SubackPacket) Details() Details { + return Details{Qos: 0, MessageID: sa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go new file mode 100644 index 00000000000..0787ce07c07 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go @@ -0,0 +1,72 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubscribePacket is an internal representation of the fields of the +//Subscribe MQTT packet +type SubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string + Qoss []byte +} + +func (s *SubscribePacket) String() string { + str := fmt.Sprintf("%s", s.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics) + return str +} + +func (s *SubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeUint16(s.MessageID)) + for i, topic := range s.Topics { + body.Write(encodeString(topic)) + body.WriteByte(s.Qoss[i]) + } + s.FixedHeader.RemainingLength = body.Len() + packet := s.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (s *SubscribePacket) Unpack(b io.Reader) error { + var err error + s.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + payloadLength := s.FixedHeader.RemainingLength - 2 + for payloadLength > 0 { + topic, err := decodeString(b) + if err != nil { + return err + } + s.Topics = append(s.Topics, topic) + qos, err := decodeByte(b) + if err != nil { + return err + } + s.Qoss = append(s.Qoss, qos) + payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos + } + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (s *SubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: s.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go new file mode 100644 index 00000000000..4b40c273af0 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//UnsubackPacket is an internal representation of the fields of the +//Unsuback MQTT packet +type UnsubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (ua *UnsubackPacket) String() string { + str := fmt.Sprintf("%s", ua.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", ua.MessageID) + return str +} + +func (ua *UnsubackPacket) Write(w io.Writer) error { + var err error + ua.FixedHeader.RemainingLength = 2 + packet := ua.FixedHeader.pack() + packet.Write(encodeUint16(ua.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ua *UnsubackPacket) Unpack(b io.Reader) error { + var err error + ua.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ua *UnsubackPacket) Details() Details { + return Details{Qos: 0, MessageID: ua.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go new file mode 100644 index 00000000000..2012c310f83 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go @@ -0,0 +1,59 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//UnsubscribePacket is an internal representation of the fields of the +//Unsubscribe MQTT packet +type UnsubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string +} + +func (u *UnsubscribePacket) String() string { + str := fmt.Sprintf("%s", u.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", u.MessageID) + return str +} + +func (u *UnsubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(u.MessageID)) + for _, topic := range u.Topics { + body.Write(encodeString(topic)) + } + u.FixedHeader.RemainingLength = body.Len() + packet := u.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (u *UnsubscribePacket) Unpack(b io.Reader) error { + var err error + u.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + + for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) { + u.Topics = append(u.Topics, topic) + } + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (u *UnsubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: u.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go new file mode 100644 index 00000000000..dcbcb1dd226 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +func keepalive(c *client) { + defer c.workers.Done() + DEBUG.Println(PNG, "keepalive starting") + var checkInterval int64 + var pingSent time.Time + + if c.options.KeepAlive > 10 { + checkInterval = 5 + } else { + checkInterval = c.options.KeepAlive / 2 + } + + intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second))) + defer intervalTicker.Stop() + + for { + select { + case <-c.stop: + DEBUG.Println(PNG, "keepalive stopped") + return + case <-intervalTicker.C: + lastSent := c.lastSent.Load().(time.Time) + lastReceived := c.lastReceived.Load().(time.Time) + + DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds()) + if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) { + if atomic.LoadInt32(&c.pingOutstanding) == 0 { + DEBUG.Println(PNG, "keepalive sending ping") + ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) + //We don't want to wait behind large messages being sent, the Write call + //will block until it it able to send the packet. + atomic.StoreInt32(&c.pingOutstanding, 1) + ping.Write(c.conn) + c.lastSent.Store(time.Now()) + pingSent = time.Now() + } + } + if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Now().Sub(pingSent) >= c.options.PingTimeout { + CRITICAL.Println(PNG, "pingresp not received, disconnecting") + c.errors <- errors.New("pingresp not received, disconnecting") + return + } + } + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go new file mode 100644 index 00000000000..7b4e8f8082f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/router.go @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "container/list" + "strings" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// route is a type which associates MQTT Topic strings with a +// callback to be executed upon the arrival of a message associated +// with a subscription to that topic. +type route struct { + topic string + callback MessageHandler +} + +// match takes a slice of strings which represent the route being tested having been split on '/' +// separators, and a slice of strings representing the topic string in the published message, similarly +// split. +// The function determines if the topic string matches the route according to the MQTT topic rules +// and returns a boolean of the outcome +func match(route []string, topic []string) bool { + if len(route) == 0 { + if len(topic) == 0 { + return true + } + return false + } + + if len(topic) == 0 { + if route[0] == "#" { + return true + } + return false + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return match(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return match(routeSplit(route), strings.Split(topic, "/")) +} + +// removes $share and sharename when splitting the route to allow +// shared subscription routes to correctly match the topic +func routeSplit(route string) []string { + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +// match takes the topic string of the published message and does a basic compare to the +// string of the current Route, if they match it returns true +func (r *route) match(topic string) bool { + return r.topic == topic || routeIncludesTopic(r.topic, topic) +} + +type router struct { + sync.RWMutex + routes *list.List + defaultHandler MessageHandler + messages chan *packets.PublishPacket + stop chan bool +} + +// newRouter returns a new instance of a Router and channel which can be used to tell the Router +// to stop +func newRouter() (*router, chan bool) { + router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)} + stop := router.stop + return router, stop +} + +// addRoute takes a topic string and MessageHandler callback. It looks in the current list of +// routes to see if there is already a matching Route. If there is it replaces the current +// callback with the new one. If not it add a new entry to the list of Routes. +func (r *router) addRoute(topic string, callback MessageHandler) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r := e.Value.(*route) + r.callback = callback + return + } + } + r.routes.PushBack(&route{topic: topic, callback: callback}) +} + +// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If +// found it removes the Route from the list. +func (r *router) deleteRoute(topic string) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r.routes.Remove(e) + return + } + } +} + +// setDefaultHandler assigns a default callback that will be called if no matching Route +// is found for an incoming Publish. +func (r *router) setDefaultHandler(handler MessageHandler) { + r.Lock() + defer r.Unlock() + r.defaultHandler = handler +} + +// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that +// takes messages off the channel, matches them against the internal route list and calls the +// associated callback (or the defaultHandler, if one exists and no other route matched). If +// anything is sent down the stop channel the function will end. +func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) { + go func() { + for { + select { + case message := <-messages: + sent := false + r.RLock() + m := messageFromPublish(message, client.ackFunc(message)) + handlers := []MessageHandler{} + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(message.TopicName) { + if order { + handlers = append(handlers, e.Value.(*route).callback) + } else { + hd := e.Value.(*route).callback + go func() { + hd(client, m) + m.Ack() + }() + } + sent = true + } + } + if !sent && r.defaultHandler != nil { + if order { + handlers = append(handlers, r.defaultHandler) + } else { + go func() { + r.defaultHandler(client, m) + m.Ack() + }() + } + } + r.RUnlock() + for _, handler := range handlers { + func() { + handler(client, m) + m.Ack() + }() + } + case <-r.stop: + return + } + } + }() +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go new file mode 100644 index 00000000000..24a76b7df3c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/store.go @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "strconv" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + inboundPrefix = "i." + outboundPrefix = "o." +) + +// Store is an interface which can be used to provide implementations +// for message persistence. +// Because we may have to store distinct messages with the same +// message ID, we need a unique key for each message. This is +// possible by prepending "i." or "o." to each message id +type Store interface { + Open() + Put(key string, message packets.ControlPacket) + Get(key string) packets.ControlPacket + All() []string + Del(key string) + Close() + Reset() +} + +// A key MUST have the form "X.[messageid]" +// where X is 'i' or 'o' +func mIDFromKey(key string) uint16 { + s := key[2:] + i, err := strconv.Atoi(s) + chkerr(err) + return uint16(i) +} + +// Return true if key prefix is outbound +func isKeyOutbound(key string) bool { + return key[:2] == outboundPrefix +} + +// Return true if key prefix is inbound +func isKeyInbound(key string) bool { + return key[:2] == inboundPrefix +} + +// Return a string of the form "i.[id]" +func inboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", inboundPrefix, id) +} + +// Return a string of the form "o.[id]" +func outboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", outboundPrefix, id) +} + +// govern which outgoing messages are persisted +func persistOutbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.PubcompPacket: + // Sending puback. delete matching publish + // from ibound + s.Del(inboundKeyFromMID(m.Details().MessageID)) + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: + // Sending publish. store in obound + // until puback received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Sending publish. store in obound + // until pubrel received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + } +} + +// govern which incoming messages are persisted +func persistInbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: + // Received a puback. delete matching publish + // from obound + s.Del(outboundKeyFromMID(m.Details().MessageID)) + case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket: + // Received a publish. store it in ibound + // until puback sent + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Received a publish. store it in ibound + // until pubrel received + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go new file mode 100644 index 00000000000..0818553332f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/token.go @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Allan Stockdill-Mander + */ + +package mqtt + +import ( + "sync" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// PacketAndToken is a struct that contains both a ControlPacket and a +// Token. This struct is passed via channels between the client interface +// code and the underlying code responsible for sending and receiving +// MQTT messages. +type PacketAndToken struct { + p packets.ControlPacket + t tokenCompletor +} + +// Token defines the interface for the tokens used to indicate when +// actions have completed. +type Token interface { + Wait() bool + WaitTimeout(time.Duration) bool + Error() error +} + +type TokenErrorSetter interface { + setError(error) +} + +type tokenCompletor interface { + Token + TokenErrorSetter + flowComplete() +} + +type baseToken struct { + m sync.RWMutex + complete chan struct{} + err error +} + +// Wait will wait indefinitely for the Token to complete, ie the Publish +// to be sent and confirmed receipt from the broker +func (b *baseToken) Wait() bool { + <-b.complete + return true +} + +// WaitTimeout takes a time.Duration to wait for the flow associated with the +// Token to complete, returns true if it returned before the timeout or +// returns false if the timeout occurred. In the case of a timeout the Token +// does not have an error set in case the caller wishes to wait again +func (b *baseToken) WaitTimeout(d time.Duration) bool { + b.m.Lock() + defer b.m.Unlock() + + timer := time.NewTimer(d) + select { + case <-b.complete: + if !timer.Stop() { + <-timer.C + } + return true + case <-timer.C: + } + + return false +} + +func (b *baseToken) flowComplete() { + select { + case <-b.complete: + default: + close(b.complete) + } +} + +func (b *baseToken) Error() error { + b.m.RLock() + defer b.m.RUnlock() + return b.err +} + +func (b *baseToken) setError(e error) { + b.m.Lock() + b.err = e + b.flowComplete() + b.m.Unlock() +} + +func newToken(tType byte) tokenCompletor { + switch tType { + case packets.Connect: + return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Subscribe: + return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} + case packets.Publish: + return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Unsubscribe: + return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Disconnect: + return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} + } + return nil +} + +// ConnectToken is an extension of Token containing the extra fields +// required to provide information about calls to Connect() +type ConnectToken struct { + baseToken + returnCode byte + sessionPresent bool +} + +// ReturnCode returns the acknowlegement code in the connack sent +// in response to a Connect() +func (c *ConnectToken) ReturnCode() byte { + c.m.RLock() + defer c.m.RUnlock() + return c.returnCode +} + +// SessionPresent returns a bool representing the value of the +// session present field in the connack sent in response to a Connect() +func (c *ConnectToken) SessionPresent() bool { + c.m.RLock() + defer c.m.RUnlock() + return c.sessionPresent +} + +// PublishToken is an extension of Token containing the extra fields +// required to provide information about calls to Publish() +type PublishToken struct { + baseToken + messageID uint16 +} + +// MessageID returns the MQTT message ID that was assigned to the +// Publish packet when it was sent to the broker +func (p *PublishToken) MessageID() uint16 { + return p.messageID +} + +// SubscribeToken is an extension of Token containing the extra fields +// required to provide information about calls to Subscribe() +type SubscribeToken struct { + baseToken + subs []string + subResult map[string]byte +} + +// Result returns a map of topics that were subscribed to along with +// the matching return code from the broker. This is either the Qos +// value of the subscription or an error code. +func (s *SubscribeToken) Result() map[string]byte { + s.m.RLock() + defer s.m.RUnlock() + return s.subResult +} + +// UnsubscribeToken is an extension of Token containing the extra fields +// required to provide information about calls to Unsubscribe() +type UnsubscribeToken struct { + baseToken +} + +// DisconnectToken is an extension of Token containing the extra fields +// required to provide information about calls to Disconnect() +type DisconnectToken struct { + baseToken +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go new file mode 100644 index 00000000000..6fa3ad2ac5f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "strings" +) + +//ErrInvalidQos is the error returned when an packet is to be sent +//with an invalid Qos value +var ErrInvalidQos = errors.New("Invalid QoS") + +//ErrInvalidTopicEmptyString is the error returned when a topic string +//is passed in that is 0 length +var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string") + +//ErrInvalidTopicMultilevel is the error returned when a topic string +//is passed in that has the multi level wildcard in any position but +//the last +var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level") + +// Topic Names and Topic Filters +// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard +// to the validity of Topic strings. +// - A Topic must be between 1 and 65535 bytes. +// - A Topic is case sensitive. +// - A Topic may contain whitespace. +// - A Topic containing a leading forward slash is different than a Topic without. +// - A Topic may be "/" (two levels, both empty string). +// - A Topic must be UTF-8 encoded. +// - A Topic may contain any number of levels. +// - A Topic may contain an empty level (two forward slashes in a row). +// - A TopicName may not contain a wildcard. +// - A TopicFilter may only have a # (multi-level) wildcard as the last level. +// - A TopicFilter may contain any number of + (single-level) wildcards. +// - A TopicFilter with a # will match the absense of a level +// Example: a subscription to "foo/#" will match messages published to "foo". + +func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { + var topics []string + var qoss []byte + for topic, qos := range subs { + if err := validateTopicAndQos(topic, qos); err != nil { + return nil, nil, err + } + topics = append(topics, topic) + qoss = append(qoss, qos) + } + + return topics, qoss, nil +} + +func validateTopicAndQos(topic string, qos byte) error { + if len(topic) == 0 { + return ErrInvalidTopicEmptyString + } + + levels := strings.Split(topic, "/") + for i, level := range levels { + if level == "#" && i != len(levels)-1 { + return ErrInvalidTopicMultilevel + } + } + + if qos < 0 || qos > 2 { + return ErrInvalidQos + } + return nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go new file mode 100644 index 00000000000..195c8173dcf --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +func (NOOPLogger) Println(v ...interface{}) {} +func (NOOPLogger) Printf(format string, v ...interface{}) {} + +// Internal levels of library output that are initialised to not print +// anything but can be overridden by programmer +var ( + ERROR Logger = NOOPLogger{} + CRITICAL Logger = NOOPLogger{} + WARN Logger = NOOPLogger{} + DEBUG Logger = NOOPLogger{} +) diff --git a/vendor/github.com/evalphobia/logrus_sentry/.travis.yml b/vendor/github.com/evalphobia/logrus_sentry/.travis.yml new file mode 100644 index 00000000000..33edbc4c30f --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.x + - tip +matrix: + allow_failures: + - go: tip +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - test -z "$(gofmt -s -l . | tee /dev/stderr)" + - go tool vet -all -structtags -shadow . +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/evalphobia/logrus_sentry/LICENSE b/vendor/github.com/evalphobia/logrus_sentry/LICENSE new file mode 100644 index 00000000000..a2301f534a2 --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 logrus_sentry Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/evalphobia/logrus_sentry/README.md b/vendor/github.com/evalphobia/logrus_sentry/README.md new file mode 100644 index 00000000000..9a65baafa65 --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/README.md @@ -0,0 +1,163 @@ +Sentry Hook for Logrus :walrus: +---- + +[![GoDoc][1]][2] [![Release][5]][6] [![Build Status][7]][8] [![Coverage Status][9]][10] [![Go Report Card][13]][14] [![Code Climate][19]][20] [![BCH compliance][21]][22] + +[1]: https://godoc.org/github.com/evalphobia/logrus_sentry?status.svg +[2]: https://godoc.org/github.com/evalphobia/logrus_sentry +[4]: LICENSE.md +[5]: https://img.shields.io/github/release/evalphobia/logrus_sentry.svg +[6]: https://github.com/evalphobia/logrus_sentry/releases/latest +[7]: https://travis-ci.org/evalphobia/logrus_sentry.svg?branch=master +[8]: https://travis-ci.org/evalphobia/logrus_sentry +[9]: https://coveralls.io/repos/evalphobia/logrus_sentry/badge.svg?branch=master&service=github +[10]: https://coveralls.io/github/evalphobia/logrus_sentry?branch=master +[11]: https://codecov.io/github/evalphobia/logrus_sentry/coverage.svg?branch=master +[12]: https://codecov.io/github/evalphobia/logrus_sentry?branch=master +[13]: https://goreportcard.com/badge/github.com/evalphobia/logrus_sentry +[14]: https://goreportcard.com/report/github.com/evalphobia/logrus_sentry +[15]: https://img.shields.io/github/downloads/evalphobia/logrus_sentry/total.svg?maxAge=1800 +[16]: https://github.com/evalphobia/logrus_sentry/releases +[17]: https://img.shields.io/github/stars/evalphobia/logrus_sentry.svg +[18]: https://github.com/evalphobia/logrus_sentry/stargazers +[19]: https://codeclimate.com/github/evalphobia/logrus_sentry/badges/gpa.svg +[20]: https://codeclimate.com/github/evalphobia/logrus_sentry +[21]: https://bettercodehub.com/edge/badge/evalphobia/logrus_sentry?branch=master +[22]: https://bettercodehub.com/ + + +[Sentry](https://getsentry.com) provides both self-hosted and hosted +solutions for exception tracking. +Both client and server are +[open source](https://github.com/getsentry/sentry). + +## Usage + +Every sentry application defined on the server gets a different +[DSN](https://www.getsentry.com/docs/). In the example below replace +`YOUR_DSN` with the one created for your application. + +```go +import ( + "github.com/sirupsen/logrus" + "github.com/evalphobia/logrus_sentry" +) + +func main() { + log := logrus.New() + hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + }) + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags: + +```go +tags := map[string]string{ + "site": "example.com", +} +levels := []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, +} +hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels) + +``` + +If you wish to initialize a SentryHook with an already initialized raven client, you can use +the `NewWithClientSentryHook` constructor: + +```go +import ( + "github.com/sirupsen/logrus" + "github.com/evalphobia/logrus_sentry" + "github.com/getsentry/raven-go" +) + +func main() { + log := logrus.New() + + client, err := raven.New(YOUR_DSN) + if err != nil { + log.Fatal(err) + } + + hook, err := logrus_sentry.NewWithClientSentryHook(client, []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + }) + + if err == nil { + log.Hooks.Add(hook) + } +} + +hook, err := NewWithClientSentryHook(client, []logrus.Level{ + logrus.ErrorLevel, +}) +``` + +## Special fields + +Some logrus fields have a special meaning in this hook, and they will be especially processed by Sentry. + + +| Field key | Description | +| ------------- | ------------- | +| `event_id` | Each logged event is identified by the `event_id`, which is hexadecimal string representing a UUID4 value. You can manually specify the identifier of a log event by supplying this field. The `event_id` string should be in one of the following UUID format: `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` `xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx` and `urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`)| +| `user_name` | Name of the user who is in the context of the event | +| `user_email` | Email of the user who is in the context of the event | +| `user_id` | ID of the user who is in the context of the event | +| `user_ip` | IP of the user who is in the context of the event | +| `server_name` | Also known as hostname, it is the name of the server which is logging the event (hostname.example.com) | +| `tags` | `tags` are `raven.Tags` struct from `github.com/getsentry/raven-go` and override default tags data | +| `fingerprint` | `fingerprint` is an string array, that allows you to affect sentry's grouping of events as detailed in the [sentry documentation](https://docs.sentry.io/learn/rollups/#customize-grouping-with-fingerprints) | +| `logger` | `logger` is the part of the application which is logging the event. In go this usually means setting it to the name of the package. | +| `http_request` | `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry. | + +## Timeout + +`Timeout` is the time the sentry hook will wait for a response +from the sentry server. + +If this time elapses with no response from +the server an error will be returned. + +If `Timeout` is set to 0 the SentryHook will not wait for a reply +and will assume a correct delivery. + +The SentryHook has a default timeout of `100 milliseconds` when created +with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field: + +```go +hook, _ := logrus_sentry.NewSentryHook(...) +hook.Timeout = 20*time.Second +``` + +## Enabling Stacktraces + +By default the hook will not send any stacktraces. However, this can be enabled +with: + +```go +hook, _ := logrus_sentry.NewSentryHook(...) +hook.StacktraceConfiguration.Enable = true +``` + +Subsequent calls to `logger.Error` and above will create a stacktrace. + +Other configuration options are: +- `StacktraceConfiguration.Level` the logrus level at which to start capturing stacktraces. +- `StacktraceConfiguration.Skip` how many stack frames to skip before stacktrace starts recording. +- `StacktraceConfiguration.Context` the number of lines to include around a stack frame for context. +- `StacktraceConfiguration.InAppPrefixes` the prefixes that will be matched against the stack frame to identify it as in_app +- `StacktraceConfiguration.IncludeErrorBreadcrumb` whether to create a breadcrumb with the full text of error diff --git a/vendor/github.com/evalphobia/logrus_sentry/data_field.go b/vendor/github.com/evalphobia/logrus_sentry/data_field.go new file mode 100644 index 00000000000..37c89fbb66f --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/data_field.go @@ -0,0 +1,137 @@ +package logrus_sentry + +import ( + "net/http" + + "github.com/getsentry/raven-go" + "github.com/sirupsen/logrus" +) + +const ( + fieldEventID = "event_id" + fieldFingerprint = "fingerprint" + fieldLogger = "logger" + fieldServerName = "server_name" + fieldTags = "tags" + fieldHTTPRequest = "http_request" + fieldUser = "user" +) + +type dataField struct { + data logrus.Fields + omitList map[string]struct{} +} + +func newDataField(data logrus.Fields) *dataField { + return &dataField{ + data: data, + omitList: make(map[string]struct{}), + } +} + +func (d *dataField) len() int { + return len(d.data) +} + +func (d *dataField) isOmit(key string) bool { + _, ok := d.omitList[key] + return ok +} + +func (d *dataField) getLogger() (string, bool) { + if logger, ok := d.data[fieldLogger].(string); ok { + d.omitList[fieldLogger] = struct{}{} + return logger, true + } + return "", false +} + +func (d *dataField) getServerName() (string, bool) { + if serverName, ok := d.data[fieldServerName].(string); ok { + d.omitList[fieldServerName] = struct{}{} + return serverName, true + } + return "", false +} + +func (d *dataField) getTags() (raven.Tags, bool) { + if tags, ok := d.data[fieldTags].(raven.Tags); ok { + d.omitList[fieldTags] = struct{}{} + return tags, true + } + return nil, false +} + +func (d *dataField) getFingerprint() ([]string, bool) { + if fingerprint, ok := d.data[fieldFingerprint].([]string); ok { + d.omitList[fieldFingerprint] = struct{}{} + return fingerprint, true + } + return nil, false +} + +func (d *dataField) getError() (error, bool) { + if err, ok := d.data[logrus.ErrorKey].(error); ok { + d.omitList[logrus.ErrorKey] = struct{}{} + return err, true + } + return nil, false +} + +func (d *dataField) getHTTPRequest() (*raven.Http, bool) { + if req, ok := d.data[fieldHTTPRequest].(*http.Request); ok { + d.omitList[fieldHTTPRequest] = struct{}{} + return raven.NewHttp(req), true + } + if req, ok := d.data[fieldHTTPRequest].(*raven.Http); ok { + d.omitList[fieldHTTPRequest] = struct{}{} + return req, true + } + return nil, false +} + +func (d *dataField) getEventID() (string, bool) { + eventID, ok := d.data[fieldEventID].(string) + if !ok { + return "", false + } + + //verify eventID is 32 characters hexadecimal string (UUID4) + uuid := parseUUID(eventID) + if uuid == nil { + return "", false + } + + d.omitList[fieldEventID] = struct{}{} + return uuid.noDashString(), true +} + +func (d *dataField) getUser() (*raven.User, bool) { + data := d.data + if v, ok := data[fieldUser]; ok { + switch val := v.(type) { + case *raven.User: + d.omitList[fieldUser] = struct{}{} + return val, true + case raven.User: + d.omitList[fieldUser] = struct{}{} + return &val, true + } + } + + username, _ := data["user_name"].(string) + email, _ := data["user_email"].(string) + id, _ := data["user_id"].(string) + ip, _ := data["user_ip"].(string) + + if username == "" && email == "" && id == "" && ip == "" { + return nil, false + } + + return &raven.User{ + ID: id, + Username: username, + Email: email, + IP: ip, + }, true +} diff --git a/vendor/github.com/evalphobia/logrus_sentry/sentry.go b/vendor/github.com/evalphobia/logrus_sentry/sentry.go new file mode 100644 index 00000000000..5d314b97e6e --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/sentry.go @@ -0,0 +1,424 @@ +package logrus_sentry + +import ( + "encoding/json" + "fmt" + "runtime" + "sync" + "time" + + raven "github.com/getsentry/raven-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + severityMap = map[logrus.Level]raven.Severity{ + logrus.TraceLevel: raven.DEBUG, + logrus.DebugLevel: raven.DEBUG, + logrus.InfoLevel: raven.INFO, + logrus.WarnLevel: raven.WARNING, + logrus.ErrorLevel: raven.ERROR, + logrus.FatalLevel: raven.FATAL, + logrus.PanicLevel: raven.FATAL, + } +) + +// SentryHook delivers logs to a sentry server. +type SentryHook struct { + // Timeout sets the time to wait for a delivery error from the sentry server. + // If this is set to zero the server will not wait for any response and will + // consider the message correctly sent. + // + // This is ignored for asynchronous hooks. If you want to set a timeout when + // using an async hook (to bound the length of time that hook.Flush can take), + // you probably want to create your own raven.Client and set + // ravenClient.Transport.(*raven.HTTPTransport).Client.Timeout to set a + // timeout on the underlying HTTP request instead. + Timeout time.Duration + StacktraceConfiguration StackTraceConfiguration + + client *raven.Client + levels []logrus.Level + + serverName string + ignoreFields map[string]struct{} + extraFilters map[string]func(interface{}) interface{} + errorHandlers []func(entry *logrus.Entry, err error) + + asynchronous bool + + mu sync.RWMutex + wg sync.WaitGroup +} + +// The Stacktracer interface allows an error type to return a raven.Stacktrace. +type Stacktracer interface { + GetStacktrace() *raven.Stacktrace +} + +type causer interface { + Cause() error +} + +type pkgErrorStackTracer interface { + StackTrace() errors.StackTrace +} + +// StackTraceConfiguration allows for configuring stacktraces +type StackTraceConfiguration struct { + // whether stacktraces should be enabled + Enable bool + // the level at which to start capturing stacktraces + Level logrus.Level + // how many stack frames to skip before stacktrace starts recording + Skip int + // the number of lines to include around a stack frame for context + Context int + // the prefixes that will be matched against the stack frame. + // if the stack frame's package matches one of these prefixes + // sentry will identify the stack frame as "in_app" + InAppPrefixes []string + // whether sending exception type should be enabled. + SendExceptionType bool + // whether the exception type and message should be switched. + SwitchExceptionTypeAndMessage bool + // whether to include a breadcrumb with the full error stack + IncludeErrorBreadcrumb bool +} + +// NewSentryHook creates a hook to be added to an instance of logger +// and initializes the raven client. +// This method sets the timeout to 100 milliseconds. +func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { + client, err := raven.New(DSN) + if err != nil { + return nil, err + } + return NewWithClientSentryHook(client, levels) +} + +// NewWithTagsSentryHook creates a hook with tags to be added to an instance +// of logger and initializes the raven client. This method sets the timeout to +// 100 milliseconds. +func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) { + client, err := raven.NewWithTags(DSN, tags) + if err != nil { + return nil, err + } + return NewWithClientSentryHook(client, levels) +} + +// NewWithClientSentryHook creates a hook using an initialized raven client. +// This method sets the timeout to 100 milliseconds. +func NewWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) { + return &SentryHook{ + Timeout: 100 * time.Millisecond, + StacktraceConfiguration: StackTraceConfiguration{ + Enable: false, + Level: logrus.ErrorLevel, + Skip: 6, + Context: 0, + InAppPrefixes: nil, + SendExceptionType: true, + }, + client: client, + levels: levels, + ignoreFields: make(map[string]struct{}), + extraFilters: make(map[string]func(interface{}) interface{}), + }, nil +} + +// NewAsyncSentryHook creates a hook same as NewSentryHook, but in asynchronous +// mode. +func NewAsyncSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { + hook, err := NewSentryHook(DSN, levels) + return setAsync(hook), err +} + +// NewAsyncWithTagsSentryHook creates a hook same as NewWithTagsSentryHook, but +// in asynchronous mode. +func NewAsyncWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) { + hook, err := NewWithTagsSentryHook(DSN, tags, levels) + return setAsync(hook), err +} + +// NewAsyncWithClientSentryHook creates a hook same as NewWithClientSentryHook, +// but in asynchronous mode. +func NewAsyncWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) { + hook, err := NewWithClientSentryHook(client, levels) + return setAsync(hook), err +} + +func setAsync(hook *SentryHook) *SentryHook { + if hook == nil { + return nil + } + hook.asynchronous = true + return hook +} + +// Fire is called when an event should be sent to sentry +// Special fields that sentry uses to give more information to the server +// are extracted from entry.Data (if they are found) +// These fields are: error, logger, server_name, http_request, tags +func (hook *SentryHook) Fire(entry *logrus.Entry) error { + hook.mu.RLock() // Allow multiple go routines to log simultaneously + defer hook.mu.RUnlock() + + df := newDataField(entry.Data) + + err, hasError := df.getError() + var crumbs *Breadcrumbs + if hasError && hook.StacktraceConfiguration.IncludeErrorBreadcrumb { + crumbs = &Breadcrumbs{ + Values: []Value{{ + Timestamp: int64(time.Now().Unix()), + Type: "error", + Message: fmt.Sprintf("%+v", err), + }}, + } + } + + packet := raven.NewPacketWithExtra(entry.Message, nil, crumbs) + packet.Timestamp = raven.Timestamp(entry.Time) + packet.Level = severityMap[entry.Level] + packet.Platform = "go" + + // set special fields + if hook.serverName != "" { + packet.ServerName = hook.serverName + } + if logger, ok := df.getLogger(); ok { + packet.Logger = logger + } + if serverName, ok := df.getServerName(); ok { + packet.ServerName = serverName + } + if eventID, ok := df.getEventID(); ok { + packet.EventID = eventID + } + if tags, ok := df.getTags(); ok { + packet.Tags = tags + } + if fingerprint, ok := df.getFingerprint(); ok { + packet.Fingerprint = fingerprint + } + if req, ok := df.getHTTPRequest(); ok { + packet.Interfaces = append(packet.Interfaces, req) + } + if user, ok := df.getUser(); ok { + packet.Interfaces = append(packet.Interfaces, user) + } + + // set stacktrace data + stConfig := &hook.StacktraceConfiguration + if stConfig.Enable && entry.Level <= stConfig.Level { + if err, ok := df.getError(); ok { + var currentStacktrace *raven.Stacktrace + currentStacktrace = hook.findStacktrace(err) + if currentStacktrace == nil { + currentStacktrace = raven.NewStacktrace(stConfig.Skip, stConfig.Context, stConfig.InAppPrefixes) + } + cause := errors.Cause(err) + if cause == nil { + cause = err + } + exc := raven.NewException(cause, currentStacktrace) + if !stConfig.SendExceptionType { + exc.Type = "" + } + if stConfig.SwitchExceptionTypeAndMessage { + packet.Interfaces = append(packet.Interfaces, currentStacktrace) + packet.Culprit = exc.Type + ": " + currentStacktrace.Culprit() + } else { + packet.Interfaces = append(packet.Interfaces, exc) + packet.Culprit = err.Error() + } + } else { + currentStacktrace := raven.NewStacktrace(stConfig.Skip, stConfig.Context, stConfig.InAppPrefixes) + if currentStacktrace != nil { + packet.Interfaces = append(packet.Interfaces, currentStacktrace) + } + } + } else { + // set the culprit even when the stack trace is disabled, as long as we have an error + if err, ok := df.getError(); ok { + packet.Culprit = err.Error() + } + } + + // set other fields + dataExtra := hook.formatExtraData(df) + if packet.Extra == nil { + packet.Extra = dataExtra + } else { + for k, v := range dataExtra { + packet.Extra[k] = v + } + } + + _, errCh := hook.client.Capture(packet, nil) + + switch { + case hook.asynchronous: + // Our use of hook.mu guarantees that we are following the WaitGroup rule of + // not calling Add in parallel with Wait. + hook.wg.Add(1) + go func() { + if err := <-errCh; err != nil { + for _, handlerFn := range hook.errorHandlers { + handlerFn(entry, err) + } + } + hook.wg.Done() + }() + return nil + case hook.Timeout == 0: + return nil + default: + timeout := hook.Timeout + timeoutCh := time.After(timeout) + select { + case err := <-errCh: + for _, handlerFn := range hook.errorHandlers { + handlerFn(entry, err) + } + return err + case <-timeoutCh: + return fmt.Errorf("no response from sentry server in %s", timeout) + } + } +} + +// Flush waits for the log queue to empty. This function only does anything in +// asynchronous mode. +func (hook *SentryHook) Flush() { + if !hook.asynchronous { + return + } + hook.mu.Lock() // Claim exclusive access; any logging goroutines will block until the flush completes + defer hook.mu.Unlock() + + hook.wg.Wait() +} + +func (hook *SentryHook) findStacktrace(err error) *raven.Stacktrace { + var stacktrace *raven.Stacktrace + var stackErr errors.StackTrace + for err != nil { + // Find the earliest *raven.Stacktrace, or error.StackTrace + if tracer, ok := err.(Stacktracer); ok { + stacktrace = tracer.GetStacktrace() + stackErr = nil + } else if tracer, ok := err.(pkgErrorStackTracer); ok { + stacktrace = nil + stackErr = tracer.StackTrace() + } + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + break + } + } + if stackErr != nil { + stacktrace = hook.convertStackTrace(stackErr) + } + return stacktrace +} + +// convertStackTrace converts an errors.StackTrace into a natively consumable +// *raven.Stacktrace +func (hook *SentryHook) convertStackTrace(st errors.StackTrace) *raven.Stacktrace { + stConfig := &hook.StacktraceConfiguration + stFrames := []errors.Frame(st) + frames := make([]*raven.StacktraceFrame, 0, len(stFrames)) + for i := range stFrames { + pc := uintptr(stFrames[i]) + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + frame := raven.NewStacktraceFrame(pc, fn.Name(), file, line, stConfig.Context, stConfig.InAppPrefixes) + if frame != nil { + frames = append(frames, frame) + } + } + + // Sentry wants the frames with the oldest first, so reverse them + for i, j := 0, len(frames)-1; i < j; i, j = i+1, j-1 { + frames[i], frames[j] = frames[j], frames[i] + } + return &raven.Stacktrace{Frames: frames} +} + +// Levels returns the available logging levels. +func (hook *SentryHook) Levels() []logrus.Level { + return hook.levels +} + +// AddIgnore adds field name to ignore. +func (hook *SentryHook) AddIgnore(name string) { + hook.ignoreFields[name] = struct{}{} +} + +// AddExtraFilter adds a custom filter function. +func (hook *SentryHook) AddExtraFilter(name string, fn func(interface{}) interface{}) { + hook.extraFilters[name] = fn +} + +// AddErrorHandler adds a error handler function used when Sentry returns error. +func (hook *SentryHook) AddErrorHandler(fn func(entry *logrus.Entry, err error)) { + hook.errorHandlers = append(hook.errorHandlers, fn) +} + +func (hook *SentryHook) formatExtraData(df *dataField) (result map[string]interface{}) { + // create a map for passing to Sentry's extra data + result = make(map[string]interface{}, df.len()) + for k, v := range df.data { + if df.isOmit(k) { + continue // skip already used special fields + } + if _, ok := hook.ignoreFields[k]; ok { + continue + } + + if fn, ok := hook.extraFilters[k]; ok { + v = fn(v) // apply custom filter + } else { + v = formatData(v) // use default formatter + } + result[k] = v + } + return result +} + +// formatData returns value as a suitable format. +func formatData(value interface{}) (formatted interface{}) { + switch value := value.(type) { + case json.Marshaler: + return value + case error: + return value.Error() + case fmt.Stringer: + return value.String() + default: + return value + } +} + +// utility classes for breadcrumb support +type Breadcrumbs struct { + Values []Value `json:"values"` +} + +type Value struct { + Timestamp int64 `json:"timestamp"` + Type string `json:"type"` + Message string `json:"message"` + Category string `json:"category"` + Level string `json:"string"` + Data interface{} `json:"data"` +} + +func (b *Breadcrumbs) Class() string { + return "breadcrumbs" +} diff --git a/vendor/github.com/evalphobia/logrus_sentry/sentry_setter.go b/vendor/github.com/evalphobia/logrus_sentry/sentry_setter.go new file mode 100644 index 00000000000..1912e53f8a9 --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/sentry_setter.go @@ -0,0 +1,55 @@ +package logrus_sentry + +import ( + "github.com/getsentry/raven-go" +) + +// SetDefaultLoggerName sets default logger name tag. +func (hook *SentryHook) SetDefaultLoggerName(name string) { + hook.client.SetDefaultLoggerName(name) +} + +// SetEnvironment sets environment tag. +func (hook *SentryHook) SetEnvironment(environment string) { + hook.client.SetEnvironment(environment) +} + +// SetHttpContext sets http client. +func (hook *SentryHook) SetHttpContext(h *raven.Http) { + hook.client.SetHttpContext(h) +} + +// SetIgnoreErrors sets ignoreErrorsRegexp. +func (hook *SentryHook) SetIgnoreErrors(errs ...string) error { + return hook.client.SetIgnoreErrors(errs) +} + +// SetIncludePaths sets includePaths. +func (hook *SentryHook) SetIncludePaths(p []string) { + hook.client.SetIncludePaths(p) +} + +// SetRelease sets release tag. +func (hook *SentryHook) SetRelease(release string) { + hook.client.SetRelease(release) +} + +// SetSampleRate sets sampling rate. +func (hook *SentryHook) SetSampleRate(rate float32) error { + return hook.client.SetSampleRate(rate) +} + +// SetTagsContext sets tags. +func (hook *SentryHook) SetTagsContext(t map[string]string) { + hook.client.SetTagsContext(t) +} + +// SetUserContext sets user. +func (hook *SentryHook) SetUserContext(u *raven.User) { + hook.client.SetUserContext(u) +} + +// SetServerName sets server_name tag. +func (hook *SentryHook) SetServerName(serverName string) { + hook.serverName = serverName +} diff --git a/vendor/github.com/evalphobia/logrus_sentry/utils.go b/vendor/github.com/evalphobia/logrus_sentry/utils.go new file mode 100644 index 00000000000..8b4a9095d3b --- /dev/null +++ b/vendor/github.com/evalphobia/logrus_sentry/utils.go @@ -0,0 +1,135 @@ +package logrus_sentry + +import ( + "fmt" + "strings" +) + +/* +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type uuid []byte + +// parseUUID decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func parseUUID(s string) uuid { + //If it is in no dash format "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + if len(s) == 32 { + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, 8, 10, + 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid + } + + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid uuid) string() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +func (uuid uuid) noDashString() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x%04x%04x%04x%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/facebookgo/clock/LICENSE b/vendor/github.com/facebookgo/clock/LICENSE new file mode 100644 index 00000000000..ce212cb1cee --- /dev/null +++ b/vendor/github.com/facebookgo/clock/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/facebookgo/clock/README.md b/vendor/github.com/facebookgo/clock/README.md new file mode 100644 index 00000000000..5d4f4fe72e7 --- /dev/null +++ b/vendor/github.com/facebookgo/clock/README.md @@ -0,0 +1,104 @@ +clock [![Build Status](https://drone.io/github.com/benbjohnson/clock/status.png)](https://drone.io/github.com/benbjohnson/clock/latest) [![Coverage Status](https://coveralls.io/repos/benbjohnson/clock/badge.png?branch=master)](https://coveralls.io/r/benbjohnson/clock?branch=master) [![GoDoc](https://godoc.org/github.com/benbjohnson/clock?status.png)](https://godoc.org/github.com/benbjohnson/clock) ![Project status](http://img.shields.io/status/experimental.png?color=red) +===== + +Clock is a small library for mocking time in Go. It provides an interface +around the standard library's [`time`][time] package so that the application +can use the realtime clock while tests can use the mock clock. + +[time]: http://golang.org/pkg/time/ + + +## Usage + +### Realtime Clock + +Your application can maintain a `Clock` variable that will allow realtime and +mock clocks to be interchangable. For example, if you had an `Application` type: + +```go +import "github.com/benbjohnson/clock" + +type Application struct { + Clock clock.Clock +} +``` + +You could initialize it to use the realtime clock like this: + +```go +var app Application +app.Clock = clock.New() +... +``` + +Then all timers and time-related functionality should be performed from the +`Clock` variable. + + +### Mocking time + +In your tests, you will want to use a `Mock` clock: + +```go +import ( + "testing" + + "github.com/benbjohnson/clock" +) + +func TestApplication_DoSomething(t *testing.T) { + mock := clock.NewMock() + app := Application{Clock: mock} + ... +} +``` + +Now that you've initialized your application to use the mock clock, you can +adjust the time programmatically. The mock clock always starts from the Unix +epoch (midnight, Jan 1, 1970 UTC). + + +### Controlling time + +The mock clock provides the same functions that the standard library's `time` +package provides. For example, to find the current time, you use the `Now()` +function: + +```go +mock := clock.NewMock() + +// Find the current time. +mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC + +// Move the clock forward. +mock.Add(2 * time.Hour) + +// Check the time again. It's 2 hours later! +mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC +``` + +Timers and Tickers are also controlled by this same mock clock. They will only +execute when the clock is moved forward: + +``` +mock := clock.NewMock() +count := 0 + +// Kick off a timer to increment every 1 mock second. +go func() { + ticker := clock.Ticker(1 * time.Second) + for { + <-ticker.C + count++ + } +}() +runtime.Gosched() + +// Move the clock forward 10 second. +mock.Add(10 * time.Second) + +// This prints 10. +fmt.Println(count) +``` + + diff --git a/vendor/github.com/facebookgo/clock/clock.go b/vendor/github.com/facebookgo/clock/clock.go new file mode 100644 index 00000000000..bca1a7ba8b3 --- /dev/null +++ b/vendor/github.com/facebookgo/clock/clock.go @@ -0,0 +1,363 @@ +package clock + +import ( + "runtime" + "sort" + "sync" + "time" +) + +// Clock represents an interface to the functions in the standard library time +// package. Two implementations are available in the clock package. The first +// is a real-time clock which simply wraps the time package's functions. The +// second is a mock clock which will only make forward progress when +// programmatically adjusted. +type Clock interface { + After(d time.Duration) <-chan time.Time + AfterFunc(d time.Duration, f func()) *Timer + Now() time.Time + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time + Ticker(d time.Duration) *Ticker + Timer(d time.Duration) *Timer +} + +// New returns an instance of a real-time clock. +func New() Clock { + return &clock{} +} + +// clock implements a real-time clock by simply wrapping the time package functions. +type clock struct{} + +func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (c *clock) AfterFunc(d time.Duration, f func()) *Timer { + return &Timer{timer: time.AfterFunc(d, f)} +} + +func (c *clock) Now() time.Time { return time.Now() } + +func (c *clock) Sleep(d time.Duration) { time.Sleep(d) } + +func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) } + +func (c *clock) Ticker(d time.Duration) *Ticker { + t := time.NewTicker(d) + return &Ticker{C: t.C, ticker: t} +} + +func (c *clock) Timer(d time.Duration) *Timer { + t := time.NewTimer(d) + return &Timer{C: t.C, timer: t} +} + +// Mock represents a mock clock that only moves forward programmically. +// It can be preferable to a real-time clock when testing time-based functionality. +type Mock struct { + mu sync.Mutex + now time.Time // current time + timers clockTimers // tickers & timers + + calls Calls + waiting []waiting + callsMutex sync.Mutex +} + +// NewMock returns an instance of a mock clock. +// The current time of the mock clock on initialization is the Unix epoch. +func NewMock() *Mock { + return &Mock{now: time.Unix(0, 0)} +} + +// Add moves the current time of the mock clock forward by the duration. +// This should only be called from a single goroutine at a time. +func (m *Mock) Add(d time.Duration) { + // Calculate the final current time. + t := m.now.Add(d) + + // Continue to execute timers until there are no more before the new time. + for { + if !m.runNextTimer(t) { + break + } + } + + // Ensure that we end with the new time. + m.mu.Lock() + m.now = t + m.mu.Unlock() + + // Give a small buffer to make sure the other goroutines get handled. + gosched() +} + +// runNextTimer executes the next timer in chronological order and moves the +// current time to the timer's next tick time. The next time is not executed if +// it's next time if after the max time. Returns true if a timer is executed. +func (m *Mock) runNextTimer(max time.Time) bool { + m.mu.Lock() + + // Sort timers by time. + sort.Sort(m.timers) + + // If we have no more timers then exit. + if len(m.timers) == 0 { + m.mu.Unlock() + return false + } + + // Retrieve next timer. Exit if next tick is after new time. + t := m.timers[0] + if t.Next().After(max) { + m.mu.Unlock() + return false + } + + // Move "now" forward and unlock clock. + m.now = t.Next() + m.mu.Unlock() + + // Execute timer. + t.Tick(m.now) + return true +} + +// After waits for the duration to elapse and then sends the current time on the returned channel. +func (m *Mock) After(d time.Duration) <-chan time.Time { + defer m.inc(&m.calls.After) + return m.Timer(d).C +} + +// AfterFunc waits for the duration to elapse and then executes a function. +// A Timer is returned that can be stopped. +func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { + defer m.inc(&m.calls.AfterFunc) + t := m.Timer(d) + t.C = nil + t.fn = f + return t +} + +// Now returns the current wall time on the mock clock. +func (m *Mock) Now() time.Time { + defer m.inc(&m.calls.Now) + m.mu.Lock() + defer m.mu.Unlock() + return m.now +} + +// Sleep pauses the goroutine for the given duration on the mock clock. +// The clock must be moved forward in a separate goroutine. +func (m *Mock) Sleep(d time.Duration) { + defer m.inc(&m.calls.Sleep) + <-m.After(d) +} + +// Tick is a convenience function for Ticker(). +// It will return a ticker channel that cannot be stopped. +func (m *Mock) Tick(d time.Duration) <-chan time.Time { + defer m.inc(&m.calls.Tick) + return m.Ticker(d).C +} + +// Ticker creates a new instance of Ticker. +func (m *Mock) Ticker(d time.Duration) *Ticker { + defer m.inc(&m.calls.Ticker) + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time) + t := &Ticker{ + C: ch, + c: ch, + mock: m, + d: d, + next: m.now.Add(d), + } + m.timers = append(m.timers, (*internalTicker)(t)) + return t +} + +// Timer creates a new instance of Timer. +func (m *Mock) Timer(d time.Duration) *Timer { + defer m.inc(&m.calls.Timer) + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time) + t := &Timer{ + C: ch, + c: ch, + mock: m, + next: m.now.Add(d), + } + m.timers = append(m.timers, (*internalTimer)(t)) + return t +} + +func (m *Mock) removeClockTimer(t clockTimer) { + m.mu.Lock() + defer m.mu.Unlock() + for i, timer := range m.timers { + if timer == t { + copy(m.timers[i:], m.timers[i+1:]) + m.timers[len(m.timers)-1] = nil + m.timers = m.timers[:len(m.timers)-1] + break + } + } + sort.Sort(m.timers) +} + +func (m *Mock) inc(addr *uint32) { + m.callsMutex.Lock() + defer m.callsMutex.Unlock() + *addr++ + var newWaiting []waiting + for _, w := range m.waiting { + if m.calls.atLeast(w.expected) { + close(w.done) + continue + } + newWaiting = append(newWaiting, w) + } + m.waiting = newWaiting +} + +// Wait waits for at least the relevant calls before returning. The expected +// Calls are always over the lifetime of the Mock. Values in the Calls struct +// are used as the minimum number of calls, this allows you to wait for only +// the calls you care about. +func (m *Mock) Wait(s Calls) { + m.callsMutex.Lock() + if m.calls.atLeast(s) { + m.callsMutex.Unlock() + return + } + done := make(chan struct{}) + m.waiting = append(m.waiting, waiting{expected: s, done: done}) + m.callsMutex.Unlock() + <-done +} + +// clockTimer represents an object with an associated start time. +type clockTimer interface { + Next() time.Time + Tick(time.Time) +} + +// clockTimers represents a list of sortable timers. +type clockTimers []clockTimer + +func (a clockTimers) Len() int { return len(a) } +func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) } + +// Timer represents a single event. +// The current time will be sent on C, unless the timer was created by AfterFunc. +type Timer struct { + C <-chan time.Time + c chan time.Time + timer *time.Timer // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + fn func() // AfterFunc function, if set +} + +// Stop turns off the ticker. +func (t *Timer) Stop() { + if t.timer != nil { + t.timer.Stop() + } else { + t.mock.removeClockTimer((*internalTimer)(t)) + } +} + +type internalTimer Timer + +func (t *internalTimer) Next() time.Time { return t.next } +func (t *internalTimer) Tick(now time.Time) { + if t.fn != nil { + t.fn() + } else { + t.c <- now + } + t.mock.removeClockTimer((*internalTimer)(t)) + gosched() +} + +// Ticker holds a channel that receives "ticks" at regular intervals. +type Ticker struct { + C <-chan time.Time + c chan time.Time + ticker *time.Ticker // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + d time.Duration // time between ticks +} + +// Stop turns off the ticker. +func (t *Ticker) Stop() { + if t.ticker != nil { + t.ticker.Stop() + } else { + t.mock.removeClockTimer((*internalTicker)(t)) + } +} + +type internalTicker Ticker + +func (t *internalTicker) Next() time.Time { return t.next } +func (t *internalTicker) Tick(now time.Time) { + select { + case t.c <- now: + case <-time.After(1 * time.Millisecond): + } + t.next = now.Add(t.d) + gosched() +} + +// Sleep momentarily so that other goroutines can process. +func gosched() { runtime.Gosched() } + +// Calls keeps track of the count of calls for each of the methods on the Clock +// interface. +type Calls struct { + After uint32 + AfterFunc uint32 + Now uint32 + Sleep uint32 + Tick uint32 + Ticker uint32 + Timer uint32 +} + +// atLeast returns true if at least the number of calls in o have been made. +func (c Calls) atLeast(o Calls) bool { + if c.After < o.After { + return false + } + if c.AfterFunc < o.AfterFunc { + return false + } + if c.Now < o.Now { + return false + } + if c.Sleep < o.Sleep { + return false + } + if c.Tick < o.Tick { + return false + } + if c.Ticker < o.Ticker { + return false + } + if c.Timer < o.Timer { + return false + } + return true +} + +type waiting struct { + expected Calls + done chan struct{} +} diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/vendor/github.com/fatih/structs/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml new file mode 100644 index 00000000000..a08df798127 --- /dev/null +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip +sudo: false +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: +- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE new file mode 100644 index 00000000000..34504e4b3ef --- /dev/null +++ b/vendor/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md new file mode 100644 index 00000000000..a75eabf37bb --- /dev/null +++ b/vendor/github.com/fatih/structs/README.md @@ -0,0 +1,163 @@ +# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) + +Structs contains various utilities to work with Go (Golang) structs. It was +initially used by me to convert a struct into a `map[string]interface{}`. With +time I've added other utilities for structs. It's basically a high level +package based on primitives from the reflect package. Feel free to add new +functions or improve the existing code. + +## Install + +```bash +go get github.com/fatih/structs +``` + +## Usage and Examples + +Just like the standard lib `strings`, `bytes` and co packages, `structs` has +many global functions to manipulate or organize your struct data. Lets define +and declare a struct: + +```go +type Server struct { + Name string `json:"name,omitempty"` + ID int + Enabled bool + users []string // not exported + http.Server // embedded +} + +server := &Server{ + Name: "gopher", + ID: 123456, + Enabled: true, +} +``` + +```go +// Convert a struct to a map[string]interface{} +// => {"Name":"gopher", "ID":123456, "Enabled":true} +m := structs.Map(server) + +// Convert the values of a struct to a []interface{} +// => ["gopher", 123456, true] +v := structs.Values(server) + +// Convert the names of a struct to a []string +// (see "Names methods" for more info about fields) +n := structs.Names(server) + +// Convert the values of a struct to a []*Field +// (see "Field methods" for more info about fields) +f := structs.Fields(server) + +// Return the struct name => "Server" +n := structs.Name(server) + +// Check if any field of a struct is initialized or not. +h := structs.HasZero(server) + +// Check if all fields of a struct is initialized or not. +z := structs.IsZero(server) + +// Check if server is a struct or a pointer to struct +i := structs.IsStruct(server) +``` + +### Struct methods + +The structs functions can be also used as independent methods by creating a new +`*structs.Struct`. This is handy if you want to have more control over the +structs (such as retrieving a single Field). + +```go +// Create a new struct type: +s := structs.New(server) + +m := s.Map() // Get a map[string]interface{} +v := s.Values() // Get a []interface{} +f := s.Fields() // Get a []*Field +n := s.Names() // Get a []string +f := s.Field(name) // Get a *Field based on the given field name +f, ok := s.FieldOk(name) // Get a *Field based on the given field name +n := s.Name() // Get the struct name +h := s.HasZero() // Check if any field is uninitialized +z := s.IsZero() // Check if all fields are uninitialized +``` + +### Field methods + +We can easily examine a single Field for more detail. Below you can see how we +get and interact with various field methods: + + +```go +s := structs.New(server) + +// Get the Field struct for the "Name" field +name := s.Field("Name") + +// Get the underlying value, value => "gopher" +value := name.Value().(string) + +// Set the field's value +name.Set("another gopher") + +// Get the field's kind, kind => "string" +name.Kind() + +// Check if the field is exported or not +if name.IsExported() { + fmt.Println("Name field is exported") +} + +// Check if the value is a zero value, such as "" for string, 0 for int +if !name.IsZero() { + fmt.Println("Name is initialized") +} + +// Check if the field is an anonymous (embedded) field +if !name.IsEmbedded() { + fmt.Println("Name is not an embedded field") +} + +// Get the Field's tag value for tag name "json", tag value => "name,omitempty" +tagValue := name.Tag("json") +``` + +Nested structs are supported too: + +```go +addrField := s.Field("Server").Field("Addr") + +// Get the value for addr +a := addrField.Value().(string) + +// Or get all fields +httpServer := s.Field("Server").Fields() +``` + +We can also get a slice of Fields from the Struct type to iterate over all +fields. This is handy if you wish to examine all fields: + +```go +s := structs.New(server) + +for _, f := range s.Fields() { + fmt.Printf("field name: %+v\n", f.Name()) + + if f.IsExported() { + fmt.Printf("value : %+v\n", f.Value()) + fmt.Printf("is zero : %+v\n", f.IsZero()) + } +} +``` + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * [Cihangir Savas](https://github.com/cihangir) + +## License + +The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go new file mode 100644 index 00000000000..e69783230b4 --- /dev/null +++ b/vendor/github.com/fatih/structs/field.go @@ -0,0 +1,141 @@ +package structs + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errNotExported = errors.New("field is not exported") + errNotSettable = errors.New("field is not settable") +) + +// Field represents a single struct field that encapsulates high level +// functions around the field. +type Field struct { + value reflect.Value + field reflect.StructField + defaultTag string +} + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + return f.field.Tag.Get(key) +} + +// Value returns the underlying value of the field. It panics if the field +// is not exported. +func (f *Field) Value() interface{} { + return f.value.Interface() +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.field.Anonymous +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.field.PkgPath == "" +} + +// IsZero returns true if the given field is not initialized (has a zero value). +// It panics if the field is not exported. +func (f *Field) IsZero() bool { + zero := reflect.Zero(f.value.Type()).Interface() + current := f.Value() + + return reflect.DeepEqual(current, zero) +} + +// Name returns the name of the given field +func (f *Field) Name() string { + return f.field.Name +} + +// Kind returns the fields kind, such as "string", "map", "bool", etc .. +func (f *Field) Kind() reflect.Kind { + return f.value.Kind() +} + +// Set sets the field to given value v. It returns an error if the field is not +// settable (not addressable or not exported) or if the given value's type +// doesn't match the fields type. +func (f *Field) Set(val interface{}) error { + // we can't set unexported fields, so be sure this field is exported + if !f.IsExported() { + return errNotExported + } + + // do we get here? not sure... + if !f.value.CanSet() { + return errNotSettable + } + + given := reflect.ValueOf(val) + + if f.value.Kind() != given.Kind() { + return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) + } + + f.value.Set(given) + return nil +} + +// Zero sets the field to its zero value. It returns an error if the field is not +// settable (not addressable or not exported). +func (f *Field) Zero() error { + zero := reflect.Zero(f.value.Type()).Interface() + return f.Set(zero) +} + +// Fields returns a slice of Fields. This is particular handy to get the fields +// of a nested struct . A struct tag with the content of "-" ignores the +// checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field *http.Request `structs:"-"` +// +// It panics if field is not exported or if field's kind is not struct +func (f *Field) Fields() []*Field { + return getFields(f.value, f.defaultTag) +} + +// Field returns the field from a nested struct. It panics if the nested struct +// is not exported or if the field was not found. +func (f *Field) Field(name string) *Field { + field, ok := f.FieldOk(name) + if !ok { + panic("field not found") + } + + return field +} + +// FieldOk returns the field from a nested struct. The boolean returns whether +// the field was found (true) or not (false). +func (f *Field) FieldOk(name string) (*Field, bool) { + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) + t := v.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: v.FieldByName(name), + }, true +} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go new file mode 100644 index 00000000000..3a87706525f --- /dev/null +++ b/vendor/github.com/fatih/structs/structs.go @@ -0,0 +1,584 @@ +// Package structs contains various utilities functions to work with structs. +package structs + +import ( + "fmt" + + "reflect" +) + +var ( + // DefaultTagName is the default tag name for struct fields which provides + // a more granular to tweak certain structs. Lookup the necessary functions + // for more info. + DefaultTagName = "structs" // struct's field default tag name +) + +// Struct encapsulates a struct type to provide several high level functions +// around the struct. +type Struct struct { + raw interface{} + value reflect.Value + TagName string +} + +// New returns a new *Struct with the struct s. It panics if the s's kind is +// not struct. +func New(s interface{}) *Struct { + return &Struct{ + raw: s, + value: strctVal(s), + TagName: DefaultTagName, + } +} + +// Map converts the given struct to a map[string]interface{}, where the keys +// of the map are the field names and the values of the map the associated +// values of the fields. The default key string is the struct field name but +// can be changed in the struct field's tag value. The "structs" key in the +// struct's field tag value is the key name. Example: +// +// // Field appears in map as key "myName". +// Name string `structs:"myName"` +// +// A tag value with the content of "-" ignores that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A tag value with the content of "string" uses the stringer to get the value. Example: +// +// // The value will be output of Animal's String() func. +// // Map will panic if Animal does not implement String(). +// Field *Animal `structs:"field,string"` +// +// A tag value with the option of "flatten" used in a struct field is to flatten its fields +// in the output map. Example: +// +// // The FieldStruct's fields will be flattened into the output map. +// FieldStruct time.Time `structs:",flatten"` +// +// A tag value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field if +// the field value is empty. Example: +// +// // Field appears in map as key "myName", but the field is +// // skipped if empty. +// Field string `structs:"myName,omitempty"` +// +// // Field appears in map as key "Field" (the default), but +// // the field is skipped if empty. +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Map() map[string]interface{} { + out := make(map[string]interface{}) + s.FillMap(out) + return out +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func (s *Struct) FillMap(out map[string]interface{}) { + if out == nil { + return + } + + fields := s.structFields() + + for _, field := range fields { + name := field.Name + val := s.value.FieldByName(name) + isSubStruct := false + var finalVal interface{} + + tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) + if tagName != "" { + name = tagName + } + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if !tagOpts.Has("omitnested") { + finalVal = s.nested(val) + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map, reflect.Struct: + isSubStruct = true + } + } else { + finalVal = val.Interface() + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + out[name] = s.String() + } + continue + } + + if isSubStruct && (tagOpts.Has("flatten")) { + for k := range finalVal.(map[string]interface{}) { + out[k] = finalVal.(map[string]interface{})[k] + } + } else { + out[name] = finalVal + } + } +} + +// Values converts the given s struct's field values to a []interface{}. A +// struct tag with the content of "-" ignores the that particular field. +// Example: +// +// // Field is ignored by this package. +// Field int `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Fields is not processed further by this package. +// Field time.Time `structs:",omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field and +// is not added to the values if the field value is empty. Example: +// +// // Field is skipped if empty +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Values() []interface{} { + fields := s.structFields() + + var t []interface{} + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + t = append(t, s.String()) + } + continue + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // []interface{} to be added to the final values slice + t = append(t, Values(val.Interface())...) + } else { + t = append(t, val.Interface()) + } + } + + return t +} + +// Fields returns a slice of Fields. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Fields() []*Field { + return getFields(s.value, s.TagName) +} + +// Names returns a slice of field names. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Names() []string { + fields := getFields(s.value, s.TagName) + + names := make([]string, len(fields)) + + for i, field := range fields { + names[i] = field.Name() + } + + return names +} + +func getFields(v reflect.Value, tagName string) []*Field { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + t := v.Type() + + var fields []*Field + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if tag := field.Tag.Get(tagName); tag == "-" { + continue + } + + f := &Field{ + field: field, + value: v.FieldByName(field.Name), + } + + fields = append(fields, f) + + } + + return fields +} + +// Field returns a new Field struct that provides several high level functions +// around a single struct field entity. It panics if the field is not found. +func (s *Struct) Field(name string) *Field { + f, ok := s.FieldOk(name) + if !ok { + panic("field not found") + } + + return f +} + +// FieldOk returns a new Field struct that provides several high level functions +// around a single struct field entity. The boolean returns true if the field +// was found. +func (s *Struct) FieldOk(name string) (*Field, bool) { + t := s.value.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: s.value.FieldByName(name), + defaultTag: s.TagName, + }, true +} + +// IsZero returns true if all fields in a struct is a zero value (not +// initialized) A struct tag with the content of "-" ignores the checking of +// that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) IsZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := IsZero(val.Interface()) + if !ok { + return false + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if !reflect.DeepEqual(current, zero) { + return false + } + } + + return true +} + +// HasZero returns true if a field in a struct is not initialized (zero value). +// A struct tag with the content of "-" ignores the checking of that particular +// field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) HasZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := HasZero(val.Interface()) + if ok { + return true + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + return true + } + } + + return false +} + +// Name returns the structs's type name within its package. For more info refer +// to Name() function. +func (s *Struct) Name() string { + return s.value.Type().Name() +} + +// structFields returns the exported struct fields for a given s struct. This +// is a convenient helper method to avoid duplicate code in some of the +// functions. +func (s *Struct) structFields() []reflect.StructField { + t := s.value.Type() + + var f []reflect.StructField + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // we can't access the value of unexported fields + if field.PkgPath != "" { + continue + } + + // don't check if it's omitted + if tag := field.Tag.Get(s.TagName); tag == "-" { + continue + } + + f = append(f, field) + } + + return f +} + +func strctVal(s interface{}) reflect.Value { + v := reflect.ValueOf(s) + + // if pointer get the underlying element≤ + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + panic("not struct") + } + + return v +} + +// Map converts the given struct to a map[string]interface{}. For more info +// refer to Struct types Map() method. It panics if s's kind is not struct. +func Map(s interface{}) map[string]interface{} { + return New(s).Map() +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func FillMap(s interface{}, out map[string]interface{}) { + New(s).FillMap(out) +} + +// Values converts the given struct to a []interface{}. For more info refer to +// Struct types Values() method. It panics if s's kind is not struct. +func Values(s interface{}) []interface{} { + return New(s).Values() +} + +// Fields returns a slice of *Field. For more info refer to Struct types +// Fields() method. It panics if s's kind is not struct. +func Fields(s interface{}) []*Field { + return New(s).Fields() +} + +// Names returns a slice of field names. For more info refer to Struct types +// Names() method. It panics if s's kind is not struct. +func Names(s interface{}) []string { + return New(s).Names() +} + +// IsZero returns true if all fields is equal to a zero value. For more info +// refer to Struct types IsZero() method. It panics if s's kind is not struct. +func IsZero(s interface{}) bool { + return New(s).IsZero() +} + +// HasZero returns true if any field is equal to a zero value. For more info +// refer to Struct types HasZero() method. It panics if s's kind is not struct. +func HasZero(s interface{}) bool { + return New(s).HasZero() +} + +// IsStruct returns true if the given variable is a struct or a pointer to +// struct. +func IsStruct(s interface{}) bool { + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // uninitialized zero value of a struct + if v.Kind() == reflect.Invalid { + return false + } + + return v.Kind() == reflect.Struct +} + +// Name returns the structs's type name within its package. It returns an +// empty string for unnamed types. It panics if s's kind is not struct. +func Name(s interface{}) string { + return New(s).Name() +} + +// nested retrieves recursively all types for the given value and returns the +// nested value. +func (s *Struct) nested(val reflect.Value) interface{} { + var finalVal interface{} + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + + // do not add the converted value if there are no exported fields, ie: + // time.Time + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + case reflect.Map: + // get the element type of the map + mapElem := val.Type() + switch val.Type().Kind() { + case reflect.Ptr, reflect.Array, reflect.Map, + reflect.Slice, reflect.Chan: + mapElem = val.Type().Elem() + if mapElem.Kind() == reflect.Ptr { + mapElem = mapElem.Elem() + } + } + + // only iterate over struct types, ie: map[string]StructType, + // map[string][]StructType, + if mapElem.Kind() == reflect.Struct || + (mapElem.Kind() == reflect.Slice && + mapElem.Elem().Kind() == reflect.Struct) { + m := make(map[string]interface{}, val.Len()) + for _, k := range val.MapKeys() { + m[k.String()] = s.nested(val.MapIndex(k)) + } + finalVal = m + break + } + + // TODO(arslan): should this be optional? + finalVal = val.Interface() + case reflect.Slice, reflect.Array: + if val.Type().Kind() == reflect.Interface { + finalVal = val.Interface() + break + } + + // TODO(arslan): should this be optional? + // do not iterate of non struct types, just pass the value. Ie: []int, + // []string, co... We only iterate further if it's a struct. + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { + finalVal = val.Interface() + break + } + + slices := make([]interface{}, val.Len()) + for x := 0; x < val.Len(); x++ { + slices[x] = s.nested(val.Index(x)) + } + finalVal = slices + default: + finalVal = val.Interface() + } + + return finalVal +} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go new file mode 100644 index 00000000000..136a31eba9a --- /dev/null +++ b/vendor/github.com/fatih/structs/tags.go @@ -0,0 +1,32 @@ +package structs + +import "strings" + +// tagOptions contains a slice of tag options +type tagOptions []string + +// Has returns true if the given option is available in tagOptions +func (t tagOptions) Has(opt string) bool { + for _, tagOpt := range t { + if tagOpt == opt { + return true + } + } + + return false +} + +// parseTag splits a struct field's tag into its name and a list of options +// which comes after a name. A tag is in the form of: "name,option1,option2". +// The name can be neglectected. +func parseTag(tag string) (string, tagOptions) { + // tag is one of followings: + // "" + // "name" + // "name,opt" + // "name,opt,opt2" + // ",opt" + + res := strings.Split(tag, ",") + return res[0], res[1:] +} diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/.travis.yml b/vendor/github.com/gemnasium/logrus-graylog-hook/.travis.yml new file mode 100644 index 00000000000..9fe30e3cb9a --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip +install: + - mkdir -p $HOME/gopath/src/gopkg.in/gemnasium + - mv $HOME/gopath/src/github.com/gemnasium/logrus-graylog-hook $HOME/gopath/src/gopkg.in/gemnasium/logrus-graylog-hook.v2 + - cd $HOME/gopath/src/gopkg.in/gemnasium/logrus-graylog-hook.v2 + - go get -t diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/CHANGELOG.md b/vendor/github.com/gemnasium/logrus-graylog-hook/CHANGELOG.md new file mode 100644 index 00000000000..fbe770db9c7 --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/CHANGELOG.md @@ -0,0 +1,52 @@ +# Logrus Graylog hook + +## 2.0.7 - 2018-02-09 + +* Fix reported levels to match syslog levels (@maxatome / #27) +* Removed go 1.3 support + +## 2.0.6 - 2017-06-01 + +* Update import logrus path. See https://github.com/sirupsen/logrus/pull/384 + +## 2.0.5 - 2017-04-14 + +* Support uncompressed messages (@yuancheng-p / #24) + +## 2.0.4 - 2017-02-19 + +* Avoid panic if the hook can't dial Graylog (@chiffa-org / #21) + +## 2.0.3 - 2016-11-30 + +* Add support for extracting stacktraces from errors (@flimzy / #19) +* Allow specifying the host instead of taking `os.Hostname` by default (@mweibel / #18) + +## 2.0.2 - 2016-09-28 + +* Get rid of github.com/SocialCodeInc/go-gelf/gelf (#14) + +## 2.0.1 - 2016-08-16 + +* Fix an issue with entry constructor (#12) + +## 2.0.0 - 2016-07-02 + +* Remove facility param in constructor, as it's an optional param in Graylog 2.0 (credits: @saward / #9) +* Improve precision of TimeUnix (credits: @RaphYot / #2) +* Expose Gelf Writer (we will make this an interface in later versions) (credits: @cha-won / #10) + +## 1.1.2 - 2016-06-03 + +* Fix another race condition (credits: @dreyinger / #8) + +## 1.1.1 - 2016-05-10 + +* Fix race condition (credits: @rschmukler / #6) + +## 1.1.0 - 2015-12-04 + +* The default behavior is now to send the logs synchronously. +* A new asynchronous hook is available through `NewAsyncGraylogHook` + + diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/LICENSE b/vendor/github.com/gemnasium/logrus-graylog-hook/LICENSE new file mode 100644 index 00000000000..a4282b2a1f2 --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Gemnasium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/README.md b/vendor/github.com/gemnasium/logrus-graylog-hook/README.md new file mode 100644 index 00000000000..944cfd76623 --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/README.md @@ -0,0 +1,72 @@ +# Graylog Hook for [Logrus](https://github.com/sirupsen/logrus) :walrus: [![Build Status](https://travis-ci.org/gemnasium/logrus-graylog-hook.svg?branch=master)](https://travis-ci.org/gemnasium/logrus-graylog-hook) [![godoc reference](https://godoc.org/github.com/gemnasium/logrus-graylog-hook?status.svg)](https://godoc.org/gopkg.in/gemnasium/logrus-graylog-hook.v2) + +Use this hook to send your logs to [Graylog](http://graylog2.org) server over UDP. +The hook is non-blocking: even if UDP is used to send messages, the extra work +should not block the logging function. + +All logrus fields will be sent as additional fields on Graylog. + +## Usage + +The hook must be configured with: + +* A Graylog GELF UDP address (a "ip:port" string). +* an optional hash with extra global fields. These fields will be included in all messages sent to Graylog + +```go +package main + +import ( + "log/syslog" + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-graylog-hook.v2" + ) + +func main() { + hook := graylog.NewGraylogHook(":", map[string]interface{}{"this": "is logged every time"}) + log.AddHook(hook) + log.Info("some logging message") +} +``` + +### Asynchronous logger + +```go +package main + +import ( + "log/syslog" + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-graylog-hook.v2" + ) + +func main() { + hook := graylog.NewAsyncGraylogHook(":", map[string]interface{}{"this": "is logged every time"}) + defer hook.Flush() + log.AddHook(hook) + log.Info("some logging message") +} +``` + +### Disable standard logging + +For some reason, you may want to disable logging on stdout, and keep only the messages in Graylog (ie: a webserver inside a docker container). +You can redirect `stdout` to `/dev/null`, or just not log anything by creating a `NullFormatter` implementing `logrus.Formatter` interface: + +```go +type NullFormatter struct { +} + +// Don't spend time formatting logs +func (NullFormatter) Format(e *log.Entry) ([]byte, error) { + return []byte{}, nil +} +``` + +And set this formatter as the new logging formatter: + +```go +log.Infof("Log messages are now sent to Graylog (udp://%s)", graylogAddr) // Give a hint why logs are empty +log.AddHook(graylog.NewGraylogHook(graylogAddr, "api", map[string]interface{}{})) // set graylogAddr accordingly +log.SetFormatter(new(NullFormatter)) // Don't send logs to stdout +``` diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/error.go b/vendor/github.com/gemnasium/logrus-graylog-hook/error.go new file mode 100644 index 00000000000..5d28124cd5b --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/error.go @@ -0,0 +1,55 @@ +package graylog + +import ( + "encoding/json" + "runtime" + + "github.com/pkg/errors" +) + +// newMarshalableError builds an error which encodes its error message into JSON +func newMarshalableError(err error) *marshalableError { + return &marshalableError{err} +} + +// a marshalableError is an error that can be encoded into JSON +type marshalableError struct { + err error +} + +// MarshalJSON implements json.Marshaler for marshalableError +func (m *marshalableError) MarshalJSON() ([]byte, error) { + return json.Marshal(m.err.Error()) +} + +type causer interface { + Cause() error +} + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func extractStackTrace(err error) errors.StackTrace { + var tracer stackTracer + for { + if st, ok := err.(stackTracer); ok { + tracer = st + } + if cause, ok := err.(causer); ok { + err = cause.Cause() + continue + } + break + } + if tracer == nil { + return nil + } + return tracer.StackTrace() +} + +func extractFileAndLine(stacktrace errors.StackTrace) (string, int) { + pc := uintptr(stacktrace[0]) + fn := runtime.FuncForPC(pc) + return fn.FileLine(pc) +} diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_reader.go b/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_reader.go new file mode 100644 index 00000000000..4b175f6911d --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_reader.go @@ -0,0 +1,138 @@ +package graylog + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" +) + +type Reader struct { + mu sync.Mutex + conn net.Conn +} + +func NewReader(addr string) (*Reader, error) { + var err error + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) + } + + conn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, fmt.Errorf("ListenUDP: %s", err) + } + + r := new(Reader) + r.conn = conn + return r, nil +} + +func (r *Reader) Addr() string { + return r.conn.LocalAddr().String() +} + +// FIXME: this will discard data if p isn't big enough to hold the +// full message. +func (r *Reader) Read(p []byte) (int, error) { + msg, err := r.ReadMessage() + if err != nil { + return -1, err + } + + var data string + + if msg.Full == "" { + data = msg.Short + } else { + data = msg.Full + } + + return strings.NewReader(data).Read(p) +} + +func (r *Reader) ReadMessage() (*Message, error) { + cBuf := make([]byte, ChunkSize) + var ( + err error + n, length int + buf bytes.Buffer + cid, ocid []byte + seq, total uint8 + cHead []byte + cReader io.Reader + chunks [][]byte + ) + + for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { + if n, err = r.conn.Read(cBuf); err != nil { + return nil, fmt.Errorf("Read: %s", err) + } + cHead, cBuf = cBuf[:2], cBuf[:n] + + if bytes.Equal(cHead, magicChunked) { + //fmt.Printf("chunked %v\n", cBuf[:14]) + cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] + if ocid != nil && !bytes.Equal(cid, ocid) { + return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) + } else if ocid == nil { + ocid = cid + chunks = make([][]byte, total) + } + n = len(cBuf) - chunkedHeaderLen + //fmt.Printf("setting chunks[%d]: %d\n", seq, n) + chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) + length += n + } else { //not chunked + if total > 0 { + return nil, fmt.Errorf("out-of-band message (not chunked)") + } + break + } + } + //fmt.Printf("\nchunks: %v\n", chunks) + + if length > 0 { + if cap(cBuf) < length { + cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) + } + cBuf = cBuf[:0] + for i := range chunks { + //fmt.Printf("appending %d %v\n", i, chunks[i]) + cBuf = append(cBuf, chunks[i]...) + } + cHead = cBuf[:2] + } + + // the data we get from the wire is compressed + if bytes.Equal(cHead, magicGzip) { + cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) + } else if cHead[0] == magicZlib[0] && + (int(cHead[0])*256+int(cHead[1]))%31 == 0 { + // zlib is slightly more complicated, but correct + cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) + } else { + return nil, fmt.Errorf("unknown magic: %x %v", cHead, cHead) + } + + if err != nil { + return nil, fmt.Errorf("NewReader: %s", err) + } + + if _, err = io.Copy(&buf, cReader); err != nil { + return nil, fmt.Errorf("io.Copy: %s", err) + } + + msg := new(Message) + if err := json.Unmarshal(buf.Bytes(), &msg); err != nil { + return nil, fmt.Errorf("json.Unmarshal: %s", err) + } + + return msg, nil +} diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_writer.go b/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_writer.go new file mode 100644 index 00000000000..3c3d3e21c53 --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/gelf_writer.go @@ -0,0 +1,339 @@ +// Copyright 2012 SocialCode. All rights reserved. +// Use of this source code is governed by the MIT +// license that can be found in the LICENSE file. + +package graylog + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "compress/zlib" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "net" + "os" + "path" + "sync" + "time" +) + +// Writer implements io.Writer and is used to send both discrete +// messages to a graylog2 server, or data from a stream-oriented +// interface (like the functions in log). +type Writer struct { + mu sync.Mutex + conn net.Conn + hostname string + Facility string // defaults to current process name + CompressionLevel int // one of the consts from compress/flate + CompressionType CompressType +} + +// What compression type the writer should use when sending messages +// to the graylog2 server +type CompressType int + +const ( + CompressGzip CompressType = iota + CompressZlib + NoCompress +) + +// Message represents the contents of the GELF message. It is gzipped +// before sending. +type Message struct { + Version string `json:"version"` + Host string `json:"host"` + Short string `json:"short_message"` + Full string `json:"full_message"` + TimeUnix float64 `json:"timestamp"` + Level int32 `json:"level"` + Facility string `json:"facility"` + File string `json:"file"` + Line int `json:"line"` + Extra map[string]interface{} `json:"-"` +} + +type innerMessage Message //against circular (Un)MarshalJSON + +// Used to control GELF chunking. Should be less than (MTU - len(UDP +// header)). +// +// TODO: generate dynamically using Path MTU Discovery? +const ( + ChunkSize = 1420 + chunkedHeaderLen = 12 + chunkedDataLen = ChunkSize - chunkedHeaderLen +) + +var ( + magicChunked = []byte{0x1e, 0x0f} + magicZlib = []byte{0x78} + magicGzip = []byte{0x1f, 0x8b} +) + +// numChunks returns the number of GELF chunks necessary to transmit +// the given compressed buffer. +func numChunks(b []byte) int { + lenB := len(b) + if lenB <= ChunkSize { + return 1 + } + return len(b)/chunkedDataLen + 1 +} + +// New returns a new GELF Writer. This writer can be used to send the +// output of the standard Go log functions to a central GELF server by +// passing it to log.SetOutput() +func NewWriter(addr string) (*Writer, error) { + var err error + w := new(Writer) + w.CompressionLevel = flate.BestSpeed + + if w.conn, err = net.Dial("udp", addr); err != nil { + return nil, err + } + if w.hostname, err = os.Hostname(); err != nil { + return nil, err + } + + w.Facility = path.Base(os.Args[0]) + + return w, nil +} + +// writes the gzip compressed byte array to the connection as a series +// of GELF chunked messages. The header format is documented at +// https://github.com/Graylog2/graylog2-docs/wiki/GELF as: +// +// 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte +// total, chunk-data +func (w *Writer) writeChunked(zBytes []byte) (err error) { + b := make([]byte, 0, ChunkSize) + buf := bytes.NewBuffer(b) + nChunksI := numChunks(zBytes) + if nChunksI > 255 { + return fmt.Errorf("msg too large, would need %d chunks", nChunksI) + } + nChunks := uint8(nChunksI) + // use urandom to get a unique message id + msgId := make([]byte, 8) + n, err := io.ReadFull(rand.Reader, msgId) + if err != nil || n != 8 { + return fmt.Errorf("rand.Reader: %d/%s", n, err) + } + + bytesLeft := len(zBytes) + for i := uint8(0); i < nChunks; i++ { + buf.Reset() + // manually write header. Don't care about + // host/network byte order, because the spec only + // deals in individual bytes. + buf.Write(magicChunked) //magic + buf.Write(msgId) + buf.WriteByte(i) + buf.WriteByte(nChunks) + // slice out our chunk from zBytes + chunkLen := chunkedDataLen + if chunkLen > bytesLeft { + chunkLen = bytesLeft + } + off := int(i) * chunkedDataLen + chunk := zBytes[off : off+chunkLen] + buf.Write(chunk) + + // write this chunk, and make sure the write was good + n, err := w.conn.Write(buf.Bytes()) + if err != nil { + return fmt.Errorf("Write (chunk %d/%d): %s", i, + nChunks, err) + } + if n != len(buf.Bytes()) { + return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", + i, nChunks, n, len(buf.Bytes())) + } + + bytesLeft -= chunkLen + } + + if bytesLeft != 0 { + return fmt.Errorf("error: %d bytes left after sending", bytesLeft) + } + return nil +} + +type bufferedWriter struct { + buffer *bytes.Buffer +} + +func (bw bufferedWriter) Write(p []byte) (n int, err error) { + return bw.buffer.Write(p) +} + +func (bw bufferedWriter) Close() error { + return nil +} + +// WriteMessage sends the specified message to the GELF server +// specified in the call to New(). It assumes all the fields are +// filled out appropriately. In general, clients will want to use +// Write, rather than WriteMessage. +func (w *Writer) WriteMessage(m *Message) (err error) { + mBytes, err := json.Marshal(m) + if err != nil { + return + } + + var zBuf bytes.Buffer + var zw io.WriteCloser + switch w.CompressionType { + case CompressGzip: + zw, err = gzip.NewWriterLevel(&zBuf, w.CompressionLevel) + case CompressZlib: + zw, err = zlib.NewWriterLevel(&zBuf, w.CompressionLevel) + case NoCompress: + zw = bufferedWriter{buffer: &zBuf} + default: + panic(fmt.Sprintf("unknown compression type %d", + w.CompressionType)) + } + if err != nil { + return + } + if _, err = zw.Write(mBytes); err != nil { + return + } + zw.Close() + + zBytes := zBuf.Bytes() + if numChunks(zBytes) > 1 { + return w.writeChunked(zBytes) + } + + n, err := w.conn.Write(zBytes) + if err != nil { + return + } + if n != len(zBytes) { + return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) + } + + return nil +} + +/* +func (w *Writer) Alert(m string) (err error) +func (w *Writer) Close() error +func (w *Writer) Crit(m string) (err error) +func (w *Writer) Debug(m string) (err error) +func (w *Writer) Emerg(m string) (err error) +func (w *Writer) Err(m string) (err error) +func (w *Writer) Info(m string) (err error) +func (w *Writer) Notice(m string) (err error) +func (w *Writer) Warning(m string) (err error) +*/ + +// Write encodes the given string in a GELF message and sends it to +// the server specified in New(). +func (w *Writer) Write(p []byte) (n int, err error) { + + // 1 for the function that called us. + file, line := getCallerIgnoringLogMulti(1) + + // remove trailing and leading whitespace + p = bytes.TrimSpace(p) + + // If there are newlines in the message, use the first line + // for the short message and set the full message to the + // original input. If the input has no newlines, stick the + // whole thing in Short. + short := p + full := []byte("") + if i := bytes.IndexRune(p, '\n'); i > 0 { + short = p[:i] + full = p + } + + m := Message{ + Version: "1.0", + Host: w.hostname, + Short: string(short), + Full: string(full), + TimeUnix: float64(time.Now().UnixNano()/1000000) / 1000., + Level: 6, // info + Facility: w.Facility, + File: file, + Line: line, + Extra: map[string]interface{}{}, + } + + if err = w.WriteMessage(&m); err != nil { + return 0, err + } + + return len(p), nil +} + +func (m *Message) MarshalJSON() ([]byte, error) { + var err error + var b, eb []byte + + extra := m.Extra + b, err = json.Marshal((*innerMessage)(m)) + m.Extra = extra + if err != nil { + return nil, err + } + + if len(extra) == 0 { + return b, nil + } + + if eb, err = json.Marshal(extra); err != nil { + return nil, err + } + + // merge serialized message + serialized extra map + b[len(b)-1] = ',' + return append(b, eb[1:len(eb)]...), nil +} + +func (m *Message) UnmarshalJSON(data []byte) error { + i := make(map[string]interface{}, 16) + if err := json.Unmarshal(data, &i); err != nil { + return err + } + for k, v := range i { + if k[0] == '_' { + if m.Extra == nil { + m.Extra = make(map[string]interface{}, 1) + } + m.Extra[k] = v + continue + } + switch k { + case "version": + m.Version = v.(string) + case "host": + m.Host = v.(string) + case "short_message": + m.Short = v.(string) + case "full_message": + m.Full = v.(string) + case "timestamp": + m.TimeUnix = v.(float64) + case "level": + m.Level = int32(v.(float64)) + case "facility": + m.Facility = v.(string) + case "file": + m.File = v.(string) + case "line": + m.Line = int(v.(float64)) + } + } + return nil +} diff --git a/vendor/github.com/gemnasium/logrus-graylog-hook/graylog_hook.go b/vendor/github.com/gemnasium/logrus-graylog-hook/graylog_hook.go new file mode 100644 index 00000000000..8dcda5d5161 --- /dev/null +++ b/vendor/github.com/gemnasium/logrus-graylog-hook/graylog_hook.go @@ -0,0 +1,295 @@ +package graylog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +const StackTraceKey = "_stacktrace" + +// Set graylog.BufSize = _before_ calling NewGraylogHook +// Once the buffer is full, logging will start blocking, waiting for slots to +// be available in the queue. +var BufSize uint = 8192 + +// GraylogHook to send logs to a logging service compatible with the Graylog API and the GELF format. +type GraylogHook struct { + Extra map[string]interface{} + Host string + Level logrus.Level + gelfLogger *Writer + buf chan graylogEntry + wg sync.WaitGroup + mu sync.RWMutex + synchronous bool + blacklist map[string]bool +} + +// Graylog needs file and line params +type graylogEntry struct { + *logrus.Entry + file string + line int +} + +// NewGraylogHook creates a hook to be added to an instance of logger. +func NewGraylogHook(addr string, extra map[string]interface{}) *GraylogHook { + g, err := NewWriter(addr) + if err != nil { + logrus.WithError(err).Error("Can't create Gelf logger") + } + + host, err := os.Hostname() + if err != nil { + host = "localhost" + } + + hook := &GraylogHook{ + Host: host, + Extra: extra, + Level: logrus.DebugLevel, + gelfLogger: g, + synchronous: true, + } + return hook +} + +// NewAsyncGraylogHook creates a hook to be added to an instance of logger. +// The hook created will be asynchronous, and it's the responsibility of the user to call the Flush method +// before exiting to empty the log queue. +func NewAsyncGraylogHook(addr string, extra map[string]interface{}) *GraylogHook { + g, err := NewWriter(addr) + if err != nil { + logrus.WithError(err).Error("Can't create Gelf logger") + } + + host, err := os.Hostname() + if err != nil { + host = "localhost" + } + + hook := &GraylogHook{ + Host: host, + Extra: extra, + Level: logrus.DebugLevel, + gelfLogger: g, + buf: make(chan graylogEntry, BufSize), + } + go hook.fire() // Log in background + return hook +} + +// Fire is called when a log event is fired. +// We assume the entry will be altered by another hook, +// otherwise we might logging something wrong to Graylog +func (hook *GraylogHook) Fire(entry *logrus.Entry) error { + hook.mu.RLock() // Claim the mutex as a RLock - allowing multiple go routines to log simultaneously + defer hook.mu.RUnlock() + + // get caller file and line here, it won't be available inside the goroutine + // 1 for the function that called us. + file, line := getCallerIgnoringLogMulti(1) + + newData := make(map[string]interface{}) + for k, v := range entry.Data { + newData[k] = v + } + + newEntry := &logrus.Entry{ + Logger: entry.Logger, + Data: newData, + Time: entry.Time, + Level: entry.Level, + Message: entry.Message, + } + gEntry := graylogEntry{newEntry, file, line} + + if hook.synchronous { + hook.sendEntry(gEntry) + } else { + hook.wg.Add(1) + hook.buf <- gEntry + } + + return nil +} + +// Flush waits for the log queue to be empty. +// This func is meant to be used when the hook was created with NewAsyncGraylogHook. +func (hook *GraylogHook) Flush() { + hook.mu.Lock() // claim the mutex as a Lock - we want exclusive access to it + defer hook.mu.Unlock() + + hook.wg.Wait() +} + +// fire will loop on the 'buf' channel, and write entries to graylog +func (hook *GraylogHook) fire() { + for { + entry := <-hook.buf // receive new entry on channel + hook.sendEntry(entry) + hook.wg.Done() + } +} + +func logrusLevelToSylog(level logrus.Level) int32 { + // Till warn, logrus levels are lower than syslog by 1 + // (logrus has no equivalent of syslog LOG_NOTICE) + if level <= logrus.WarnLevel { + return int32(level) + 1 + } + // From info, logrus levels are lower than syslog by 2 + return int32(level) + 2 +} + +// sendEntry sends an entry to graylog synchronously +func (hook *GraylogHook) sendEntry(entry graylogEntry) { + if hook.gelfLogger == nil { + fmt.Println("Can't connect to Graylog") + return + } + w := hook.gelfLogger + + // remove trailing and leading whitespace + p := bytes.TrimSpace([]byte(entry.Message)) + + // If there are newlines in the message, use the first line + // for the short message and set the full message to the + // original input. If the input has no newlines, stick the + // whole thing in Short. + short := p + full := []byte("") + if i := bytes.IndexRune(p, '\n'); i > 0 { + short = p[:i] + full = p + } + + level := logrusLevelToSylog(entry.Level) + + // Don't modify entry.Data directly, as the entry will used after this hook was fired + extra := map[string]interface{}{} + // Merge extra fields + for k, v := range hook.Extra { + k = fmt.Sprintf("_%s", k) // "[...] every field you send and prefix with a _ (underscore) will be treated as an additional field." + extra[k] = v + } + for k, v := range entry.Data { + if !hook.blacklist[k] { + extraK := fmt.Sprintf("_%s", k) // "[...] every field you send and prefix with a _ (underscore) will be treated as an additional field." + if k == logrus.ErrorKey { + asError, isError := v.(error) + _, isMarshaler := v.(json.Marshaler) + if isError && !isMarshaler { + extra[extraK] = newMarshalableError(asError) + } else { + extra[extraK] = v + } + if stackTrace := extractStackTrace(asError); stackTrace != nil { + extra[StackTraceKey] = fmt.Sprintf("%+v", stackTrace) + file, line := extractFileAndLine(stackTrace) + if file != "" && line != 0 { + entry.file = file + entry.line = line + } + } + } else { + extra[extraK] = v + } + } + } + + m := Message{ + Version: "1.1", + Host: hook.Host, + Short: string(short), + Full: string(full), + TimeUnix: float64(time.Now().UnixNano()/1000000) / 1000., + Level: level, + File: entry.file, + Line: entry.line, + Extra: extra, + } + + if err := w.WriteMessage(&m); err != nil { + fmt.Println(err) + } +} + +// Levels returns the available logging levels. +func (hook *GraylogHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, level := range logrus.AllLevels { + if level <= hook.Level { + levels = append(levels, level) + } + } + return levels +} + +// Blacklist create a blacklist map to filter some message keys. +// This useful when you want your application to log extra fields locally +// but don't want graylog to store them. +func (hook *GraylogHook) Blacklist(b []string) { + hook.blacklist = make(map[string]bool) + for _, elem := range b { + hook.blacklist[elem] = true + } +} + +// SetWriter sets the hook Gelf Writer +func (hook *GraylogHook) SetWriter(w *Writer) error { + if w == nil { + return errors.New("writer can't be nil") + } + hook.gelfLogger = w + return nil +} + +// Writer returns the logger Gelf Writer +func (hook *GraylogHook) Writer() *Writer { + return hook.gelfLogger +} + +// getCaller returns the filename and the line info of a function +// further down in the call stack. Passing 0 in as callDepth would +// return info on the function calling getCallerIgnoringLog, 1 the +// parent function, and so on. Any suffixes passed to getCaller are +// path fragments like "/pkg/log/log.go", and functions in the call +// stack from that file are ignored. +func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { + // bump by 1 to ignore the getCaller (this) stackframe + callDepth++ +outer: + for { + var ok bool + _, file, line, ok = runtime.Caller(callDepth) + if !ok { + file = "???" + line = 0 + break + } + + for _, s := range suffixesToIgnore { + if strings.HasSuffix(file, s) { + callDepth++ + continue outer + } + } + break + } + return +} + +func getCallerIgnoringLogMulti(callDepth int) (string, int) { + // the +1 is to ignore this (getCallerIgnoringLogMulti) frame + return getCaller(callDepth+1, "logrus/hooks.go", "logrus/entry.go", "logrus/logger.go", "logrus/exported.go", "asm_amd64.s") +} diff --git a/vendor/github.com/getkin/kin-openapi/LICENSE b/vendor/github.com/getkin/kin-openapi/LICENSE new file mode 100644 index 00000000000..992b9831e0e --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 the project authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go new file mode 100644 index 00000000000..e59ec2c34ca --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go @@ -0,0 +1,2 @@ +// Package jsoninfo provides information and functions for marshalling/unmarshalling JSON. +package jsoninfo diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go new file mode 100644 index 00000000000..2382b731cb1 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go @@ -0,0 +1,121 @@ +package jsoninfo + +import ( + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FieldInfo contains information about JSON serialization of a field. +type FieldInfo struct { + MultipleFields bool // Whether multiple Go fields share this JSON name + HasJSONTag bool + TypeIsMarshaller bool + TypeIsUnmarshaller bool + JSONOmitEmpty bool + JSONString bool + Index []int + Type reflect.Type + JSONName string +} + +func AppendFields(fields []FieldInfo, parentIndex []int, t reflect.Type) []FieldInfo { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + // For each field + numField := t.NumField() +iteration: + for i := 0; i < numField; i++ { + f := t.Field(i) + index := make([]int, 0, len(parentIndex)+1) + index = append(index, parentIndex...) + index = append(index, i) + + // See whether this is an embedded field + if f.Anonymous { + if f.Tag.Get("json") == "-" { + continue + } + fields = AppendFields(fields, index, f.Type) + continue iteration + } + + // Ignore certain types + switch f.Type.Kind() { + case reflect.Func, reflect.Chan: + continue iteration + } + + // Is it a private (lowercase) field? + firstRune, _ := utf8.DecodeRuneInString(f.Name) + if unicode.IsLower(firstRune) { + continue iteration + } + + // Declare a field + field := FieldInfo{ + Index: index, + Type: f.Type, + JSONName: f.Name, + } + + // Read "json" tag + jsonTag := f.Tag.Get("json") + + // Read our custom "multijson" tag that + // allows multiple fields with the same name. + if v := f.Tag.Get("multijson"); v != "" { + field.MultipleFields = true + jsonTag = v + } + + // Handle "-" + if jsonTag == "-" { + continue + } + + // Parse the tag + if jsonTag != "" { + field.HasJSONTag = true + for i, part := range strings.Split(jsonTag, ",") { + if i == 0 { + if part != "" { + field.JSONName = part + } + } else { + switch part { + case "omitempty": + field.JSONOmitEmpty = true + case "string": + field.JSONString = true + } + } + } + } + + _, field.TypeIsMarshaller = field.Type.MethodByName("MarshalJSON") + _, field.TypeIsUnmarshaller = field.Type.MethodByName("UnmarshalJSON") + + // Field is done + fields = append(fields, field) + } + + return fields +} + +type sortableFieldInfos []FieldInfo + +func (list sortableFieldInfos) Len() int { + return len(list) +} + +func (list sortableFieldInfos) Less(i, j int) bool { + return list[i].JSONName < list[j].JSONName +} + +func (list sortableFieldInfos) Swap(i, j int) { + a, b := list[i], list[j] + list[i], list[j] = b, a +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go new file mode 100644 index 00000000000..2a98d68fbe5 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go @@ -0,0 +1,162 @@ +package jsoninfo + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// MarshalStrictStruct function: +// * Marshals struct fields, ignoring MarshalJSON() and fields without 'json' tag. +// * Correctly handles StrictStruct semantics. +func MarshalStrictStruct(value StrictStruct) ([]byte, error) { + encoder := NewObjectEncoder() + if err := value.EncodeWith(encoder, value); err != nil { + return nil, err + } + return encoder.Bytes() +} + +type ObjectEncoder struct { + result map[string]json.RawMessage +} + +func NewObjectEncoder() *ObjectEncoder { + return &ObjectEncoder{ + result: make(map[string]json.RawMessage, 8), + } +} + +// Bytes returns the result of encoding. +func (encoder *ObjectEncoder) Bytes() ([]byte, error) { + return json.Marshal(encoder.result) +} + +// EncodeExtension adds a key/value to the current JSON object. +func (encoder *ObjectEncoder) EncodeExtension(key string, value interface{}) error { + data, err := json.Marshal(value) + if err != nil { + return err + } + encoder.result[key] = data + return nil +} + +// EncodeExtensionMap adds all properties to the result. +func (encoder *ObjectEncoder) EncodeExtensionMap(value map[string]json.RawMessage) error { + if value != nil { + result := encoder.result + for k, v := range value { + result[k] = v + } + } + return nil +} + +func (encoder *ObjectEncoder) EncodeStructFieldsAndExtensions(value interface{}) error { + reflection := reflect.ValueOf(value) + + // Follow "encoding/json" semantics + if reflection.Kind() != reflect.Ptr { + // Panic because this is a clear programming error + panic(fmt.Errorf("value %s is not a pointer", reflection.Type().String())) + } + if reflection.IsNil() { + // Panic because this is a clear programming error + panic(fmt.Errorf("value %s is nil", reflection.Type().String())) + } + + // Take the element + reflection = reflection.Elem() + + // Obtain typeInfo + typeInfo := GetTypeInfo(reflection.Type()) + + // Declare result + result := encoder.result + + // Supported fields +iteration: + for _, field := range typeInfo.Fields { + // Fields without JSON tag are ignored + if !field.HasJSONTag { + continue + } + + // Marshal + fieldValue := reflection.FieldByIndex(field.Index) + if v, ok := fieldValue.Interface().(json.Marshaler); ok { + if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { + if field.JSONOmitEmpty { + continue iteration + } + result[field.JSONName] = []byte("null") + continue + } + fieldData, err := v.MarshalJSON() + if err != nil { + return err + } + result[field.JSONName] = fieldData + continue + } + switch fieldValue.Kind() { + case reflect.Ptr, reflect.Interface: + if fieldValue.IsNil() { + if field.JSONOmitEmpty { + continue iteration + } + result[field.JSONName] = []byte("null") + continue + } + case reflect.Struct: + case reflect.Map: + if field.JSONOmitEmpty && (fieldValue.IsNil() || fieldValue.Len() == 0) { + continue iteration + } + case reflect.Slice: + if field.JSONOmitEmpty && fieldValue.Len() == 0 { + continue iteration + } + case reflect.Bool: + x := fieldValue.Bool() + if field.JSONOmitEmpty && !x { + continue iteration + } + s := "false" + if x { + s = "true" + } + result[field.JSONName] = []byte(s) + continue iteration + case reflect.Int64, reflect.Int, reflect.Int32: + if field.JSONOmitEmpty && fieldValue.Int() == 0 { + continue iteration + } + case reflect.Uint64, reflect.Uint, reflect.Uint32: + if field.JSONOmitEmpty && fieldValue.Uint() == 0 { + continue iteration + } + case reflect.Float64: + if field.JSONOmitEmpty && fieldValue.Float() == 0.0 { + continue iteration + } + case reflect.String: + if field.JSONOmitEmpty && len(fieldValue.String()) == 0 { + continue iteration + } + default: + panic(fmt.Errorf("field %q has unsupported type %s", field.JSONName, field.Type.String())) + } + + // No special treament is needed + // Use plain old "encoding/json".Marshal + fieldData, err := json.Marshal(fieldValue.Addr().Interface()) + if err != nil { + return err + } + result[field.JSONName] = fieldData + } + + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go new file mode 100644 index 00000000000..9738bf08f16 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go @@ -0,0 +1,30 @@ +package jsoninfo + +import ( + "encoding/json" +) + +func MarshalRef(value string, otherwise interface{}) ([]byte, error) { + if len(value) > 0 { + return json.Marshal(&refProps{ + Ref: value, + }) + } + return json.Marshal(otherwise) +} + +func UnmarshalRef(data []byte, destRef *string, destOtherwise interface{}) error { + refProps := &refProps{} + if err := json.Unmarshal(data, refProps); err == nil { + ref := refProps.Ref + if len(ref) > 0 { + *destRef = ref + return nil + } + } + return json.Unmarshal(data, destOtherwise) +} + +type refProps struct { + Ref string `json:"$ref,omitempty"` +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go new file mode 100644 index 00000000000..6b4d8397781 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go @@ -0,0 +1,6 @@ +package jsoninfo + +type StrictStruct interface { + EncodeWith(encoder *ObjectEncoder, value interface{}) error + DecodeWith(decoder *ObjectDecoder, value interface{}) error +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go new file mode 100644 index 00000000000..3dbb8d5d6c0 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go @@ -0,0 +1,68 @@ +package jsoninfo + +import ( + "reflect" + "sort" + "sync" +) + +var ( + typeInfos = map[reflect.Type]*TypeInfo{} + typeInfosMutex sync.RWMutex +) + +// TypeInfo contains information about JSON serialization of a type +type TypeInfo struct { + Type reflect.Type + Fields []FieldInfo +} + +func GetTypeInfoForValue(value interface{}) *TypeInfo { + return GetTypeInfo(reflect.TypeOf(value)) +} + +// GetTypeInfo returns TypeInfo for the given type. +func GetTypeInfo(t reflect.Type) *TypeInfo { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + typeInfosMutex.RLock() + typeInfo, exists := typeInfos[t] + typeInfosMutex.RUnlock() + if exists { + return typeInfo + } + if t.Kind() != reflect.Struct { + typeInfo = &TypeInfo{ + Type: t, + } + } else { + // Allocate + typeInfo = &TypeInfo{ + Type: t, + Fields: make([]FieldInfo, 0, 16), + } + + // Add fields + typeInfo.Fields = AppendFields(nil, nil, t) + + // Sort fields + sort.Sort(sortableFieldInfos(typeInfo.Fields)) + } + + // Publish + typeInfosMutex.Lock() + typeInfos[t] = typeInfo + typeInfosMutex.Unlock() + return typeInfo +} + +// FieldNames returns all field names +func (typeInfo *TypeInfo) FieldNames() []string { + fields := typeInfo.Fields + names := make([]string, 0, len(fields)) + for _, field := range fields { + names = append(names, field.JSONName) + } + return names +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go new file mode 100644 index 00000000000..ce3c337a3eb --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go @@ -0,0 +1,121 @@ +package jsoninfo + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// UnmarshalStrictStruct function: +// * Unmarshals struct fields, ignoring UnmarshalJSON(...) and fields without 'json' tag. +// * Correctly handles StrictStruct +func UnmarshalStrictStruct(data []byte, value StrictStruct) error { + decoder, err := NewObjectDecoder(data) + if err != nil { + return err + } + return value.DecodeWith(decoder, value) +} + +type ObjectDecoder struct { + Data []byte + remainingFields map[string]json.RawMessage +} + +func NewObjectDecoder(data []byte) (*ObjectDecoder, error) { + var remainingFields map[string]json.RawMessage + if err := json.Unmarshal(data, &remainingFields); err != nil { + return nil, fmt.Errorf("failed to unmarshal extension properties: %v (%s)", err, data) + } + return &ObjectDecoder{ + Data: data, + remainingFields: remainingFields, + }, nil +} + +// DecodeExtensionMap returns all properties that were not decoded previously. +func (decoder *ObjectDecoder) DecodeExtensionMap() map[string]json.RawMessage { + return decoder.remainingFields +} + +func (decoder *ObjectDecoder) DecodeStructFieldsAndExtensions(value interface{}) error { + reflection := reflect.ValueOf(value) + if reflection.Kind() != reflect.Ptr { + panic(fmt.Errorf("value %T is not a pointer", value)) + } + if reflection.IsNil() { + panic(fmt.Errorf("value %T is nil", value)) + } + reflection = reflection.Elem() + for (reflection.Kind() == reflect.Interface || reflection.Kind() == reflect.Ptr) && !reflection.IsNil() { + reflection = reflection.Elem() + } + reflectionType := reflection.Type() + if reflectionType.Kind() != reflect.Struct { + panic(fmt.Errorf("value %T is not a struct", value)) + } + typeInfo := GetTypeInfo(reflectionType) + + // Supported fields + fields := typeInfo.Fields + remainingFields := decoder.remainingFields + for fieldIndex, field := range fields { + // Fields without JSON tag are ignored + if !field.HasJSONTag { + continue + } + + // Get data + fieldData, exists := remainingFields[field.JSONName] + if !exists { + continue + } + + // Unmarshal + if field.TypeIsUnmarshaller { + fieldType := field.Type + isPtr := false + if fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + isPtr = true + } + fieldValue := reflect.New(fieldType) + if err := fieldValue.Interface().(json.Unmarshaler).UnmarshalJSON(fieldData); err != nil { + if field.MultipleFields { + i := fieldIndex + 1 + if i < len(fields) && fields[i].JSONName == field.JSONName { + continue + } + } + return fmt.Errorf("failed to unmarshal property %q (%s): %v", + field.JSONName, fieldValue.Type().String(), err) + } + if !isPtr { + fieldValue = fieldValue.Elem() + } + reflection.FieldByIndex(field.Index).Set(fieldValue) + + // Remove the field from remaining fields + delete(remainingFields, field.JSONName) + } else { + fieldPtr := reflection.FieldByIndex(field.Index) + if fieldPtr.Kind() != reflect.Ptr || fieldPtr.IsNil() { + fieldPtr = fieldPtr.Addr() + } + if err := json.Unmarshal(fieldData, fieldPtr.Interface()); err != nil { + if field.MultipleFields { + i := fieldIndex + 1 + if i < len(fields) && fields[i].JSONName == field.JSONName { + continue + } + } + return fmt.Errorf("failed to unmarshal property %q (%s): %v", + field.JSONName, fieldPtr.Type().String(), err) + } + + // Remove the field from remaining fields + delete(remainingFields, field.JSONName) + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go new file mode 100644 index 00000000000..f69aafdc34f --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go @@ -0,0 +1,42 @@ +package jsoninfo + +import ( + "encoding/json" + "fmt" + "sort" +) + +// UnsupportedPropertiesError is a helper for extensions that want to refuse +// unsupported JSON object properties. +// +// It produces a helpful error message. +type UnsupportedPropertiesError struct { + Value interface{} + UnsupportedProperties map[string]json.RawMessage +} + +func NewUnsupportedPropertiesError(v interface{}, m map[string]json.RawMessage) error { + return &UnsupportedPropertiesError{ + Value: v, + UnsupportedProperties: m, + } +} + +func (err *UnsupportedPropertiesError) Error() string { + m := err.UnsupportedProperties + typeInfo := GetTypeInfoForValue(err.Value) + if m == nil || typeInfo == nil { + return fmt.Sprintf("invalid %T", *err) + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + supported := typeInfo.FieldNames() + if len(supported) == 0 { + return fmt.Sprintf("type \"%T\" doesn't take any properties. Unsupported properties: %+v", + err.Value, keys) + } + return fmt.Sprintf("unsupported properties: %+v (supported properties are: %+v)", keys, supported) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/callback.go b/vendor/github.com/getkin/kin-openapi/openapi3/callback.go new file mode 100644 index 00000000000..8995e479251 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/callback.go @@ -0,0 +1,36 @@ +package openapi3 + +import ( + "context" + "fmt" + + "github.com/go-openapi/jsonpointer" +) + +type Callbacks map[string]*CallbackRef + +var _ jsonpointer.JSONPointable = (*Callbacks)(nil) + +func (c Callbacks) JSONLookup(token string) (interface{}, error) { + ref, ok := c[token] + if ref == nil || !ok { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Callback is specified by OpenAPI/Swagger standard version 3.0. +type Callback map[string]*PathItem + +func (value Callback) Validate(ctx context.Context) error { + for _, v := range value { + if err := v.Validate(ctx); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/components.go b/vendor/github.com/getkin/kin-openapi/openapi3/components.go new file mode 100644 index 00000000000..7acafabf962 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/components.go @@ -0,0 +1,107 @@ +package openapi3 + +import ( + "context" + "fmt" + "regexp" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// Components is specified by OpenAPI/Swagger standard version 3.0. +type Components struct { + ExtensionProps + Schemas Schemas `json:"schemas,omitempty" yaml:"schemas,omitempty"` + Parameters ParametersMap `json:"parameters,omitempty" yaml:"parameters,omitempty"` + Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty"` + RequestBodies RequestBodies `json:"requestBodies,omitempty" yaml:"requestBodies,omitempty"` + Responses Responses `json:"responses,omitempty" yaml:"responses,omitempty"` + SecuritySchemes SecuritySchemes `json:"securitySchemes,omitempty" yaml:"securitySchemes,omitempty"` + Examples Examples `json:"examples,omitempty" yaml:"examples,omitempty"` + Links Links `json:"links,omitempty" yaml:"links,omitempty"` + Callbacks Callbacks `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` +} + +func NewComponents() Components { + return Components{} +} + +func (components *Components) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(components) +} + +func (components *Components) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, components) +} + +func (components *Components) Validate(ctx context.Context) (err error) { + for k, v := range components.Schemas { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + for k, v := range components.Parameters { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + for k, v := range components.RequestBodies { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + for k, v := range components.Responses { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + for k, v := range components.Headers { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + for k, v := range components.SecuritySchemes { + if err = ValidateIdentifier(k); err != nil { + return + } + if err = v.Validate(ctx); err != nil { + return + } + } + + return +} + +const identifierPattern = `^[a-zA-Z0-9._-]+$` + +// IdentifierRegExp verifies whether Component object key matches 'identifierPattern' pattern, according to OapiAPI v3.x.0. +// Hovever, to be able supporting legacy OpenAPI v2.x, there is a need to customize above pattern in orde not to fail +// converted v2-v3 validation +var IdentifierRegExp = regexp.MustCompile(identifierPattern) + +func ValidateIdentifier(value string) error { + if IdentifierRegExp.MatchString(value) { + return nil + } + return fmt.Errorf("identifier %q is not supported by OpenAPIv3 standard (regexp: %q)", value, identifierPattern) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/content.go b/vendor/github.com/getkin/kin-openapi/openapi3/content.go new file mode 100644 index 00000000000..5edb7d3faca --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/content.go @@ -0,0 +1,115 @@ +package openapi3 + +import ( + "context" + "strings" +) + +// Content is specified by OpenAPI/Swagger 3.0 standard. +type Content map[string]*MediaType + +func NewContent() Content { + return make(map[string]*MediaType, 4) +} + +func NewContentWithSchema(schema *Schema, consumes []string) Content { + if len(consumes) == 0 { + return Content{ + "*/*": NewMediaType().WithSchema(schema), + } + } + content := make(map[string]*MediaType, len(consumes)) + for _, mediaType := range consumes { + content[mediaType] = NewMediaType().WithSchema(schema) + } + return content +} + +func NewContentWithSchemaRef(schema *SchemaRef, consumes []string) Content { + if len(consumes) == 0 { + return Content{ + "*/*": NewMediaType().WithSchemaRef(schema), + } + } + content := make(map[string]*MediaType, len(consumes)) + for _, mediaType := range consumes { + content[mediaType] = NewMediaType().WithSchemaRef(schema) + } + return content +} + +func NewContentWithJSONSchema(schema *Schema) Content { + return Content{ + "application/json": NewMediaType().WithSchema(schema), + } +} +func NewContentWithJSONSchemaRef(schema *SchemaRef) Content { + return Content{ + "application/json": NewMediaType().WithSchemaRef(schema), + } +} + +func NewContentWithFormDataSchema(schema *Schema) Content { + return Content{ + "multipart/form-data": NewMediaType().WithSchema(schema), + } +} + +func NewContentWithFormDataSchemaRef(schema *SchemaRef) Content { + return Content{ + "multipart/form-data": NewMediaType().WithSchemaRef(schema), + } +} + +func (content Content) Get(mime string) *MediaType { + // If the mime is empty then short-circuit to the wildcard. + // We do this here so that we catch only the specific case of + // and empty mime rather than a present, but invalid, mime type. + if mime == "" { + return content["*/*"] + } + // Start by making the most specific match possible + // by using the mime type in full. + if v := content[mime]; v != nil { + return v + } + // If an exact match is not found then we strip all + // metadata from the mime type and only use the x/y + // portion. + i := strings.IndexByte(mime, ';') + if i < 0 { + // If there is no metadata then preserve the full mime type + // string for later wildcard searches. + i = len(mime) + } + mime = mime[:i] + if v := content[mime]; v != nil { + return v + } + // If the x/y pattern has no specific match then we + // try the x/* pattern. + i = strings.IndexByte(mime, '/') + if i < 0 { + // In the case that the given mime type is not valid because it is + // missing the subtype we return nil so that this does not accidentally + // resolve with the wildcard. + return nil + } + mime = mime[:i] + "/*" + if v := content[mime]; v != nil { + return v + } + // Finally, the most generic match of */* is returned + // as a catch-all. + return content["*/*"] +} + +func (value Content) Validate(ctx context.Context) error { + for _, v := range value { + // Validate MediaType + if err := v.Validate(ctx); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go b/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go new file mode 100644 index 00000000000..82ad7040b3a --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go @@ -0,0 +1,26 @@ +package openapi3 + +import ( + "context" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// Discriminator is specified by OpenAPI/Swagger standard version 3.0. +type Discriminator struct { + ExtensionProps + PropertyName string `json:"propertyName" yaml:"propertyName"` + Mapping map[string]string `json:"mapping,omitempty" yaml:"mapping,omitempty"` +} + +func (value *Discriminator) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(value) +} + +func (value *Discriminator) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +func (value *Discriminator) Validate(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/doc.go b/vendor/github.com/getkin/kin-openapi/openapi3/doc.go new file mode 100644 index 00000000000..fc2735cb761 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/doc.go @@ -0,0 +1,4 @@ +// Package openapi3 parses and writes OpenAPI 3 specification documents. +// +// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md +package openapi3 diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go b/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go new file mode 100644 index 00000000000..ad48b916054 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go @@ -0,0 +1,93 @@ +package openapi3 + +import ( + "context" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// Encoding is specified by OpenAPI/Swagger 3.0 standard. +type Encoding struct { + ExtensionProps + + ContentType string `json:"contentType,omitempty" yaml:"contentType,omitempty"` + Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty"` + Style string `json:"style,omitempty" yaml:"style,omitempty"` + Explode *bool `json:"explode,omitempty" yaml:"explode,omitempty"` + AllowReserved bool `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` +} + +func NewEncoding() *Encoding { + return &Encoding{} +} + +func (encoding *Encoding) WithHeader(name string, header *Header) *Encoding { + return encoding.WithHeaderRef(name, &HeaderRef{ + Value: header, + }) +} + +func (encoding *Encoding) WithHeaderRef(name string, ref *HeaderRef) *Encoding { + headers := encoding.Headers + if headers == nil { + headers = make(map[string]*HeaderRef) + encoding.Headers = headers + } + headers[name] = ref + return encoding +} + +func (encoding *Encoding) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(encoding) +} + +func (encoding *Encoding) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, encoding) +} + +// SerializationMethod returns a serialization method of request body. +// When serialization method is not defined the method returns the default serialization method. +func (encoding *Encoding) SerializationMethod() *SerializationMethod { + sm := &SerializationMethod{Style: SerializationForm, Explode: true} + if encoding != nil { + if encoding.Style != "" { + sm.Style = encoding.Style + } + if encoding.Explode != nil { + sm.Explode = *encoding.Explode + } + } + return sm +} + +func (value *Encoding) Validate(ctx context.Context) error { + if value == nil { + return nil + } + for k, v := range value.Headers { + if err := ValidateIdentifier(k); err != nil { + return nil + } + if err := v.Validate(ctx); err != nil { + return nil + } + } + + // Validate a media types's serialization method. + sm := value.SerializationMethod() + switch { + case sm.Style == SerializationForm && sm.Explode, + sm.Style == SerializationForm && !sm.Explode, + sm.Style == SerializationSpaceDelimited && sm.Explode, + sm.Style == SerializationSpaceDelimited && !sm.Explode, + sm.Style == SerializationPipeDelimited && sm.Explode, + sm.Style == SerializationPipeDelimited && !sm.Explode, + sm.Style == SerializationDeepObject && sm.Explode: + // it is a valid + default: + return fmt.Errorf("serialization method with style=%q and explode=%v is not supported by media type", sm.Style, sm.Explode) + } + + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/errors.go b/vendor/github.com/getkin/kin-openapi/openapi3/errors.go new file mode 100644 index 00000000000..ce52cd48379 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/errors.go @@ -0,0 +1,43 @@ +package openapi3 + +import ( + "bytes" + "errors" +) + +// MultiError is a collection of errors, intended for when +// multiple issues need to be reported upstream +type MultiError []error + +func (me MultiError) Error() string { + buff := &bytes.Buffer{} + for _, e := range me { + buff.WriteString(e.Error()) + buff.WriteString(" | ") + } + return buff.String() +} + +//Is allows you to determine if a generic error is in fact a MultiError using `errors.Is()` +//It will also return true if any of the contained errors match target +func (me MultiError) Is(target error) bool { + if _, ok := target.(MultiError); ok { + return true + } + for _, e := range me { + if errors.Is(e, target) { + return true + } + } + return false +} + +//As allows you to use `errors.As()` to set target to the first error within the multi error that matches the target type +func (me MultiError) As(target interface{}) bool { + for _, e := range me { + if errors.As(e, target) { + return true + } + } + return false +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/examples.go b/vendor/github.com/getkin/kin-openapi/openapi3/examples.go new file mode 100644 index 00000000000..f7f90ce5436 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/examples.go @@ -0,0 +1,53 @@ +package openapi3 + +import ( + "context" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type Examples map[string]*ExampleRef + +var _ jsonpointer.JSONPointable = (*Examples)(nil) + +func (e Examples) JSONLookup(token string) (interface{}, error) { + ref, ok := e[token] + if ref == nil || !ok { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Example is specified by OpenAPI/Swagger 3.0 standard. +type Example struct { + ExtensionProps + + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Value interface{} `json:"value,omitempty" yaml:"value,omitempty"` + ExternalValue string `json:"externalValue,omitempty" yaml:"externalValue,omitempty"` +} + +func NewExample(value interface{}) *Example { + return &Example{ + Value: value, + } +} + +func (example *Example) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(example) +} + +func (example *Example) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, example) +} + +func (value *Example) Validate(ctx context.Context) error { + return nil // TODO +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/extension.go b/vendor/github.com/getkin/kin-openapi/openapi3/extension.go new file mode 100644 index 00000000000..f6b7ef9bbe8 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/extension.go @@ -0,0 +1,38 @@ +package openapi3 + +import ( + "github.com/getkin/kin-openapi/jsoninfo" +) + +// ExtensionProps provides support for OpenAPI extensions. +// It reads/writes all properties that begin with "x-". +type ExtensionProps struct { + Extensions map[string]interface{} `json:"-" yaml:"-"` +} + +// Assert that the type implements the interface +var _ jsoninfo.StrictStruct = &ExtensionProps{} + +// EncodeWith will be invoked by package "jsoninfo" +func (props *ExtensionProps) EncodeWith(encoder *jsoninfo.ObjectEncoder, value interface{}) error { + for k, v := range props.Extensions { + if err := encoder.EncodeExtension(k, v); err != nil { + return err + } + } + return encoder.EncodeStructFieldsAndExtensions(value) +} + +// DecodeWith will be invoked by package "jsoninfo" +func (props *ExtensionProps) DecodeWith(decoder *jsoninfo.ObjectDecoder, value interface{}) error { + if err := decoder.DecodeStructFieldsAndExtensions(value); err != nil { + return err + } + source := decoder.DecodeExtensionMap() + result := make(map[string]interface{}, len(source)) + for k, v := range source { + result[k] = v + } + props.Extensions = result + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go b/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go new file mode 100644 index 00000000000..5a1476bde2d --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go @@ -0,0 +1,21 @@ +package openapi3 + +import ( + "github.com/getkin/kin-openapi/jsoninfo" +) + +// ExternalDocs is specified by OpenAPI/Swagger standard version 3.0. +type ExternalDocs struct { + ExtensionProps + + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} + +func (e *ExternalDocs) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(e) +} + +func (e *ExternalDocs) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, e) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/header.go b/vendor/github.com/getkin/kin-openapi/openapi3/header.go new file mode 100644 index 00000000000..5fdc31771dc --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/header.go @@ -0,0 +1,128 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type Headers map[string]*HeaderRef + +var _ jsonpointer.JSONPointable = (*Headers)(nil) + +func (h Headers) JSONLookup(token string) (interface{}, error) { + ref, ok := h[token] + if ref == nil || !ok { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Header is specified by OpenAPI/Swagger 3.0 standard. +// See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md#headerObject +type Header struct { + Parameter +} + +var _ jsonpointer.JSONPointable = (*Header)(nil) + +func (value *Header) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +// SerializationMethod returns a header's serialization method. +func (value *Header) SerializationMethod() (*SerializationMethod, error) { + style := value.Style + if style == "" { + style = SerializationSimple + } + explode := false + if value.Explode != nil { + explode = *value.Explode + } + return &SerializationMethod{Style: style, Explode: explode}, nil +} + +func (value *Header) Validate(ctx context.Context) error { + if value.Name != "" { + return errors.New("header 'name' MUST NOT be specified, it is given in the corresponding headers map") + } + if value.In != "" { + return errors.New("header 'in' MUST NOT be specified, it is implicitly in header") + } + + // Validate a parameter's serialization method. + sm, err := value.SerializationMethod() + if err != nil { + return err + } + if smSupported := false || + sm.Style == SerializationSimple && !sm.Explode || + sm.Style == SerializationSimple && sm.Explode; !smSupported { + e := fmt.Errorf("serialization method with style=%q and explode=%v is not supported by a header parameter", sm.Style, sm.Explode) + return fmt.Errorf("header schema is invalid: %v", e) + } + + if (value.Schema == nil) == (value.Content == nil) { + e := fmt.Errorf("parameter must contain exactly one of content and schema: %v", value) + return fmt.Errorf("header schema is invalid: %v", e) + } + if schema := value.Schema; schema != nil { + if err := schema.Validate(ctx); err != nil { + return fmt.Errorf("header schema is invalid: %v", err) + } + } + + if content := value.Content; content != nil { + if err := content.Validate(ctx); err != nil { + return fmt.Errorf("header content is invalid: %v", err) + } + } + return nil +} + +func (value Header) JSONLookup(token string) (interface{}, error) { + switch token { + case "schema": + if value.Schema != nil { + if value.Schema.Ref != "" { + return &Ref{Ref: value.Schema.Ref}, nil + } + return value.Schema.Value, nil + } + case "name": + return value.Name, nil + case "in": + return value.In, nil + case "description": + return value.Description, nil + case "style": + return value.Style, nil + case "explode": + return value.Explode, nil + case "allowEmptyValue": + return value.AllowEmptyValue, nil + case "allowReserved": + return value.AllowReserved, nil + case "deprecated": + return value.Deprecated, nil + case "required": + return value.Required, nil + case "example": + return value.Example, nil + case "examples": + return value.Examples, nil + case "content": + return value.Content, nil + } + + v, _, err := jsonpointer.GetForToken(value.ExtensionProps, token) + return v, err +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/info.go b/vendor/github.com/getkin/kin-openapi/openapi3/info.go new file mode 100644 index 00000000000..2adffff1a8f --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/info.go @@ -0,0 +1,93 @@ +package openapi3 + +import ( + "context" + "errors" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// Info is specified by OpenAPI/Swagger standard version 3.0. +type Info struct { + ExtensionProps + Title string `json:"title" yaml:"title"` // Required + Description string `json:"description,omitempty" yaml:"description,omitempty"` + TermsOfService string `json:"termsOfService,omitempty" yaml:"termsOfService,omitempty"` + Contact *Contact `json:"contact,omitempty" yaml:"contact,omitempty"` + License *License `json:"license,omitempty" yaml:"license,omitempty"` + Version string `json:"version" yaml:"version"` // Required +} + +func (value *Info) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(value) +} + +func (value *Info) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +func (value *Info) Validate(ctx context.Context) error { + if contact := value.Contact; contact != nil { + if err := contact.Validate(ctx); err != nil { + return err + } + } + + if license := value.License; license != nil { + if err := license.Validate(ctx); err != nil { + return err + } + } + + if value.Version == "" { + return errors.New("value of version must be a non-empty string") + } + + if value.Title == "" { + return errors.New("value of title must be a non-empty string") + } + + return nil +} + +// Contact is specified by OpenAPI/Swagger standard version 3.0. +type Contact struct { + ExtensionProps + Name string `json:"name,omitempty" yaml:"name,omitempty"` + URL string `json:"url,omitempty" yaml:"url,omitempty"` + Email string `json:"email,omitempty" yaml:"email,omitempty"` +} + +func (value *Contact) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(value) +} + +func (value *Contact) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +func (value *Contact) Validate(ctx context.Context) error { + return nil +} + +// License is specified by OpenAPI/Swagger standard version 3.0. +type License struct { + ExtensionProps + Name string `json:"name" yaml:"name"` // Required + URL string `json:"url,omitempty" yaml:"url,omitempty"` +} + +func (value *License) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(value) +} + +func (value *License) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +func (value *License) Validate(ctx context.Context) error { + if value.Name == "" { + return errors.New("value of license name must be a non-empty string") + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/internalize_refs.go b/vendor/github.com/getkin/kin-openapi/openapi3/internalize_refs.go new file mode 100644 index 00000000000..3a993bfb47f --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/internalize_refs.go @@ -0,0 +1,369 @@ +package openapi3 + +import ( + "context" + "path/filepath" + "strings" +) + +type RefNameResolver func(string) string + +// DefaultRefResolver is a default implementation of refNameResolver for the +// InternalizeRefs function. +// +// If a reference points to an element inside a document, it returns the last +// element in the reference using filepath.Base. Otherwise if the reference points +// to a file, it returns the file name trimmed of all extensions. +func DefaultRefNameResolver(ref string) string { + if ref == "" { + return "" + } + split := strings.SplitN(ref, "#", 2) + if len(split) == 2 { + return filepath.Base(split[1]) + } + ref = split[0] + for ext := filepath.Ext(ref); len(ext) > 0; ext = filepath.Ext(ref) { + ref = strings.TrimSuffix(ref, ext) + } + return filepath.Base(ref) +} + +func schemaNames(s Schemas) []string { + out := make([]string, 0, len(s)) + for i := range s { + out = append(out, i) + } + return out +} + +func parametersMapNames(s ParametersMap) []string { + out := make([]string, 0, len(s)) + for i := range s { + out = append(out, i) + } + return out +} + +func isExternalRef(ref string) bool { + return ref != "" && !strings.HasPrefix(ref, "#/components/") +} + +func (doc *T) addSchemaToSpec(s *SchemaRef, refNameResolver RefNameResolver) { + if s == nil || !isExternalRef(s.Ref) { + return + } + + name := refNameResolver(s.Ref) + if _, ok := doc.Components.Schemas[name]; ok { + s.Ref = "#/components/schemas/" + name + return + } + + if doc.Components.Schemas == nil { + doc.Components.Schemas = make(Schemas) + } + doc.Components.Schemas[name] = s.Value.NewRef() + s.Ref = "#/components/schemas/" + name +} + +func (doc *T) addParameterToSpec(p *ParameterRef, refNameResolver RefNameResolver) { + if p == nil || !isExternalRef(p.Ref) { + return + } + name := refNameResolver(p.Ref) + if _, ok := doc.Components.Parameters[name]; ok { + p.Ref = "#/components/parameters/" + name + return + } + + if doc.Components.Parameters == nil { + doc.Components.Parameters = make(ParametersMap) + } + doc.Components.Parameters[name] = &ParameterRef{Value: p.Value} + p.Ref = "#/components/parameters/" + name +} + +func (doc *T) addHeaderToSpec(h *HeaderRef, refNameResolver RefNameResolver) { + if h == nil || !isExternalRef(h.Ref) { + return + } + name := refNameResolver(h.Ref) + if _, ok := doc.Components.Headers[name]; ok { + h.Ref = "#/components/headers/" + name + return + } + if doc.Components.Headers == nil { + doc.Components.Headers = make(Headers) + } + doc.Components.Headers[name] = &HeaderRef{Value: h.Value} + h.Ref = "#/components/headers/" + name +} + +func (doc *T) addRequestBodyToSpec(r *RequestBodyRef, refNameResolver RefNameResolver) { + if r == nil || !isExternalRef(r.Ref) { + return + } + name := refNameResolver(r.Ref) + if _, ok := doc.Components.RequestBodies[name]; ok { + r.Ref = "#/components/requestBodies/" + name + return + } + if doc.Components.RequestBodies == nil { + doc.Components.RequestBodies = make(RequestBodies) + } + doc.Components.RequestBodies[name] = &RequestBodyRef{Value: r.Value} + r.Ref = "#/components/requestBodies/" + name +} + +func (doc *T) addResponseToSpec(r *ResponseRef, refNameResolver RefNameResolver) { + if r == nil || !isExternalRef(r.Ref) { + return + } + name := refNameResolver(r.Ref) + if _, ok := doc.Components.Responses[name]; ok { + r.Ref = "#/components/responses/" + name + return + } + if doc.Components.Responses == nil { + doc.Components.Responses = make(Responses) + } + doc.Components.Responses[name] = &ResponseRef{Value: r.Value} + r.Ref = "#/components/responses/" + name + +} + +func (doc *T) addSecuritySchemeToSpec(ss *SecuritySchemeRef, refNameResolver RefNameResolver) { + if ss == nil || !isExternalRef(ss.Ref) { + return + } + name := refNameResolver(ss.Ref) + if _, ok := doc.Components.SecuritySchemes[name]; ok { + ss.Ref = "#/components/securitySchemes/" + name + return + } + if doc.Components.SecuritySchemes == nil { + doc.Components.SecuritySchemes = make(SecuritySchemes) + } + doc.Components.SecuritySchemes[name] = &SecuritySchemeRef{Value: ss.Value} + ss.Ref = "#/components/securitySchemes/" + name + +} + +func (doc *T) addExampleToSpec(e *ExampleRef, refNameResolver RefNameResolver) { + if e == nil || !isExternalRef(e.Ref) { + return + } + name := refNameResolver(e.Ref) + if _, ok := doc.Components.Examples[name]; ok { + e.Ref = "#/components/examples/" + name + return + } + if doc.Components.Examples == nil { + doc.Components.Examples = make(Examples) + } + doc.Components.Examples[name] = &ExampleRef{Value: e.Value} + e.Ref = "#/components/examples/" + name + +} + +func (doc *T) addLinkToSpec(l *LinkRef, refNameResolver RefNameResolver) { + if l == nil || !isExternalRef(l.Ref) { + return + } + name := refNameResolver(l.Ref) + if _, ok := doc.Components.Links[name]; ok { + l.Ref = "#/components/links/" + name + return + } + if doc.Components.Links == nil { + doc.Components.Links = make(Links) + } + doc.Components.Links[name] = &LinkRef{Value: l.Value} + l.Ref = "#/components/links/" + name + +} + +func (doc *T) addCallbackToSpec(c *CallbackRef, refNameResolver RefNameResolver) { + if c == nil || !isExternalRef(c.Ref) { + return + } + name := refNameResolver(c.Ref) + if _, ok := doc.Components.Callbacks[name]; ok { + c.Ref = "#/components/callbacks/" + name + } + if doc.Components.Callbacks == nil { + doc.Components.Callbacks = make(Callbacks) + } + doc.Components.Callbacks[name] = &CallbackRef{Value: c.Value} + c.Ref = "#/components/callbacks/" + name +} + +func (doc *T) derefSchema(s *Schema, refNameResolver RefNameResolver) { + if s == nil { + return + } + + for _, list := range []SchemaRefs{s.AllOf, s.AnyOf, s.OneOf} { + for _, s2 := range list { + doc.addSchemaToSpec(s2, refNameResolver) + if s2 != nil { + doc.derefSchema(s2.Value, refNameResolver) + } + } + } + for _, s2 := range s.Properties { + doc.addSchemaToSpec(s2, refNameResolver) + if s2 != nil { + doc.derefSchema(s2.Value, refNameResolver) + } + } + for _, ref := range []*SchemaRef{s.Not, s.AdditionalProperties, s.Items} { + doc.addSchemaToSpec(ref, refNameResolver) + if ref != nil { + doc.derefSchema(ref.Value, refNameResolver) + } + } +} + +func (doc *T) derefHeaders(hs Headers, refNameResolver RefNameResolver) { + for _, h := range hs { + doc.addHeaderToSpec(h, refNameResolver) + doc.derefParameter(h.Value.Parameter, refNameResolver) + } +} + +func (doc *T) derefExamples(es Examples, refNameResolver RefNameResolver) { + for _, e := range es { + doc.addExampleToSpec(e, refNameResolver) + } +} + +func (doc *T) derefContent(c Content, refNameResolver RefNameResolver) { + for _, mediatype := range c { + doc.addSchemaToSpec(mediatype.Schema, refNameResolver) + if mediatype.Schema != nil { + doc.derefSchema(mediatype.Schema.Value, refNameResolver) + } + doc.derefExamples(mediatype.Examples, refNameResolver) + for _, e := range mediatype.Encoding { + doc.derefHeaders(e.Headers, refNameResolver) + } + } +} + +func (doc *T) derefLinks(ls Links, refNameResolver RefNameResolver) { + for _, l := range ls { + doc.addLinkToSpec(l, refNameResolver) + } +} + +func (doc *T) derefResponses(es Responses, refNameResolver RefNameResolver) { + for _, e := range es { + doc.addResponseToSpec(e, refNameResolver) + if e.Value != nil { + doc.derefHeaders(e.Value.Headers, refNameResolver) + doc.derefContent(e.Value.Content, refNameResolver) + doc.derefLinks(e.Value.Links, refNameResolver) + } + } +} + +func (doc *T) derefParameter(p Parameter, refNameResolver RefNameResolver) { + doc.addSchemaToSpec(p.Schema, refNameResolver) + doc.derefContent(p.Content, refNameResolver) + if p.Schema != nil { + doc.derefSchema(p.Schema.Value, refNameResolver) + } +} + +func (doc *T) derefRequestBody(r RequestBody, refNameResolver RefNameResolver) { + doc.derefContent(r.Content, refNameResolver) +} + +func (doc *T) derefPaths(paths map[string]*PathItem, refNameResolver RefNameResolver) { + for _, ops := range paths { + // inline full operations + ops.Ref = "" + + for _, op := range ops.Operations() { + doc.addRequestBodyToSpec(op.RequestBody, refNameResolver) + if op.RequestBody != nil && op.RequestBody.Value != nil { + doc.derefRequestBody(*op.RequestBody.Value, refNameResolver) + } + for _, cb := range op.Callbacks { + doc.addCallbackToSpec(cb, refNameResolver) + if cb.Value != nil { + doc.derefPaths(*cb.Value, refNameResolver) + } + } + doc.derefResponses(op.Responses, refNameResolver) + for _, param := range op.Parameters { + doc.addParameterToSpec(param, refNameResolver) + if param.Value != nil { + doc.derefParameter(*param.Value, refNameResolver) + } + } + } + } +} + +// InternalizeRefs removes all references to external files from the spec and moves them +// to the components section. +// +// refNameResolver takes in references to returns a name to store the reference under locally. +// It MUST return a unique name for each reference type. +// A default implementation is provided that will suffice for most use cases. See the function +// documention for more details. +// +// Example: +// +// doc.InternalizeRefs(context.Background(), nil) +func (doc *T) InternalizeRefs(ctx context.Context, refNameResolver func(ref string) string) { + if refNameResolver == nil { + refNameResolver = DefaultRefNameResolver + } + + // Handle components section + names := schemaNames(doc.Components.Schemas) + for _, name := range names { + schema := doc.Components.Schemas[name] + doc.addSchemaToSpec(schema, refNameResolver) + if schema != nil { + schema.Ref = "" // always dereference the top level + doc.derefSchema(schema.Value, refNameResolver) + } + } + names = parametersMapNames(doc.Components.Parameters) + for _, name := range names { + p := doc.Components.Parameters[name] + doc.addParameterToSpec(p, refNameResolver) + if p != nil && p.Value != nil { + p.Ref = "" // always dereference the top level + doc.derefParameter(*p.Value, refNameResolver) + } + } + doc.derefHeaders(doc.Components.Headers, refNameResolver) + for _, req := range doc.Components.RequestBodies { + doc.addRequestBodyToSpec(req, refNameResolver) + if req != nil && req.Value != nil { + req.Ref = "" // always dereference the top level + doc.derefRequestBody(*req.Value, refNameResolver) + } + } + doc.derefResponses(doc.Components.Responses, refNameResolver) + for _, ss := range doc.Components.SecuritySchemes { + doc.addSecuritySchemeToSpec(ss, refNameResolver) + } + doc.derefExamples(doc.Components.Examples, refNameResolver) + doc.derefLinks(doc.Components.Links, refNameResolver) + for _, cb := range doc.Components.Callbacks { + doc.addCallbackToSpec(cb, refNameResolver) + if cb != nil && cb.Value != nil { + cb.Ref = "" // always dereference the top level + doc.derefPaths(*cb.Value, refNameResolver) + } + } + + doc.derefPaths(doc.Paths, refNameResolver) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/link.go b/vendor/github.com/getkin/kin-openapi/openapi3/link.go new file mode 100644 index 00000000000..7d627b8bcd2 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/link.go @@ -0,0 +1,55 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type Links map[string]*LinkRef + +func (l Links) JSONLookup(token string) (interface{}, error) { + ref, ok := l[token] + if ok == false { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref != nil && ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +var _ jsonpointer.JSONPointable = (*Links)(nil) + +// Link is specified by OpenAPI/Swagger standard version 3.0. +type Link struct { + ExtensionProps + OperationID string `json:"operationId,omitempty" yaml:"operationId,omitempty"` + OperationRef string `json:"operationRef,omitempty" yaml:"operationRef,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Parameters map[string]interface{} `json:"parameters,omitempty" yaml:"parameters,omitempty"` + Server *Server `json:"server,omitempty" yaml:"server,omitempty"` + RequestBody interface{} `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` +} + +func (value *Link) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(value) +} + +func (value *Link) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, value) +} + +func (value *Link) Validate(ctx context.Context) error { + if value.OperationID == "" && value.OperationRef == "" { + return errors.New("missing operationId or operationRef on link") + } + if value.OperationID != "" && value.OperationRef != "" { + return fmt.Errorf("operationId %q and operationRef %q are mutually exclusive", value.OperationID, value.OperationRef) + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/loader.go b/vendor/github.com/getkin/kin-openapi/openapi3/loader.go new file mode 100644 index 00000000000..0b8d0e1cc0c --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/loader.go @@ -0,0 +1,1047 @@ +package openapi3 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + + "github.com/ghodss/yaml" +) + +func foundUnresolvedRef(ref string) error { + return fmt.Errorf("found unresolved ref: %q", ref) +} + +func failedToResolveRefFragmentPart(value, what string) error { + return fmt.Errorf("failed to resolve %q in fragment in URI: %q", what, value) +} + +// Loader helps deserialize an OpenAPIv3 document +type Loader struct { + // IsExternalRefsAllowed enables visiting other files + IsExternalRefsAllowed bool + + // ReadFromURIFunc allows overriding the any file/URL reading func + ReadFromURIFunc func(loader *Loader, url *url.URL) ([]byte, error) + + Context context.Context + + rootDir string + + visitedPathItemRefs map[string]struct{} + + visitedDocuments map[string]*T + + visitedExample map[*Example]struct{} + visitedHeader map[*Header]struct{} + visitedLink map[*Link]struct{} + visitedParameter map[*Parameter]struct{} + visitedRequestBody map[*RequestBody]struct{} + visitedResponse map[*Response]struct{} + visitedSchema map[*Schema]struct{} + visitedSecurityScheme map[*SecurityScheme]struct{} +} + +// NewLoader returns an empty Loader +func NewLoader() *Loader { + return &Loader{} +} + +func (loader *Loader) resetVisitedPathItemRefs() { + loader.visitedPathItemRefs = make(map[string]struct{}) +} + +// LoadFromURI loads a spec from a remote URL +func (loader *Loader) LoadFromURI(location *url.URL) (*T, error) { + loader.resetVisitedPathItemRefs() + return loader.loadFromURIInternal(location) +} + +// LoadFromFile loads a spec from a local file path +func (loader *Loader) LoadFromFile(location string) (*T, error) { + loader.rootDir = path.Dir(location) + return loader.LoadFromURI(&url.URL{Path: filepath.ToSlash(location)}) +} + +func (loader *Loader) loadFromURIInternal(location *url.URL) (*T, error) { + data, err := loader.readURL(location) + if err != nil { + return nil, err + } + return loader.loadFromDataWithPathInternal(data, location) +} + +func (loader *Loader) allowsExternalRefs(ref string) (err error) { + if !loader.IsExternalRefsAllowed { + err = fmt.Errorf("encountered disallowed external reference: %q", ref) + } + return +} + +// loadSingleElementFromURI reads the data from ref and unmarshals to the passed element. +func (loader *Loader) loadSingleElementFromURI(ref string, rootPath *url.URL, element interface{}) (*url.URL, error) { + if err := loader.allowsExternalRefs(ref); err != nil { + return nil, err + } + + parsedURL, err := url.Parse(ref) + if err != nil { + return nil, err + } + if fragment := parsedURL.Fragment; fragment != "" { + return nil, fmt.Errorf("unexpected ref fragment %q", fragment) + } + + resolvedPath, err := resolvePath(rootPath, parsedURL) + if err != nil { + return nil, fmt.Errorf("could not resolve path: %v", err) + } + + data, err := loader.readURL(resolvedPath) + if err != nil { + return nil, err + } + if err := yaml.Unmarshal(data, element); err != nil { + return nil, err + } + + return resolvedPath, nil +} + +func (loader *Loader) readURL(location *url.URL) ([]byte, error) { + if f := loader.ReadFromURIFunc; f != nil { + return f(loader, location) + } + + if location.Scheme != "" && location.Host != "" { + resp, err := http.Get(location.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode > 399 { + return nil, fmt.Errorf("error loading %q: request returned status code %d", location.String(), resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) + } + if location.Scheme != "" || location.Host != "" || location.RawQuery != "" { + return nil, fmt.Errorf("unsupported URI: %q", location.String()) + } + return ioutil.ReadFile(location.Path) +} + +// LoadFromData loads a spec from a byte array +func (loader *Loader) LoadFromData(data []byte) (*T, error) { + loader.resetVisitedPathItemRefs() + doc := &T{} + if err := yaml.Unmarshal(data, doc); err != nil { + return nil, err + } + if err := loader.ResolveRefsIn(doc, nil); err != nil { + return nil, err + } + return doc, nil +} + +// LoadFromDataWithPath takes the OpenAPI document data in bytes and a path where the resolver can find referred +// elements and returns a *T with all resolved data or an error if unable to load data or resolve refs. +func (loader *Loader) LoadFromDataWithPath(data []byte, location *url.URL) (*T, error) { + loader.resetVisitedPathItemRefs() + return loader.loadFromDataWithPathInternal(data, location) +} + +func (loader *Loader) loadFromDataWithPathInternal(data []byte, location *url.URL) (*T, error) { + if loader.visitedDocuments == nil { + loader.visitedDocuments = make(map[string]*T) + } + uri := location.String() + if doc, ok := loader.visitedDocuments[uri]; ok { + return doc, nil + } + + doc := &T{} + loader.visitedDocuments[uri] = doc + + if err := yaml.Unmarshal(data, doc); err != nil { + return nil, err + } + if err := loader.ResolveRefsIn(doc, location); err != nil { + return nil, err + } + + return doc, nil +} + +// ResolveRefsIn expands references if for instance spec was just unmarshalled +func (loader *Loader) ResolveRefsIn(doc *T, location *url.URL) (err error) { + if loader.visitedPathItemRefs == nil { + loader.resetVisitedPathItemRefs() + } + + // Visit all components + components := doc.Components + for _, component := range components.Headers { + if err = loader.resolveHeaderRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.Parameters { + if err = loader.resolveParameterRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.RequestBodies { + if err = loader.resolveRequestBodyRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.Responses { + if err = loader.resolveResponseRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.Schemas { + if err = loader.resolveSchemaRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.SecuritySchemes { + if err = loader.resolveSecuritySchemeRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.Examples { + if err = loader.resolveExampleRef(doc, component, location); err != nil { + return + } + } + for _, component := range components.Callbacks { + if err = loader.resolveCallbackRef(doc, component, location); err != nil { + return + } + } + + // Visit all operations + for entrypoint, pathItem := range doc.Paths { + if pathItem == nil { + continue + } + if err = loader.resolvePathItemRef(doc, entrypoint, pathItem, location); err != nil { + return + } + } + + return +} + +func join(basePath *url.URL, relativePath *url.URL) (*url.URL, error) { + if basePath == nil { + return relativePath, nil + } + newPath, err := url.Parse(basePath.String()) + if err != nil { + return nil, fmt.Errorf("cannot copy path: %q", basePath.String()) + } + newPath.Path = path.Join(path.Dir(newPath.Path), relativePath.Path) + return newPath, nil +} + +func resolvePath(basePath *url.URL, componentPath *url.URL) (*url.URL, error) { + if componentPath.Scheme == "" && componentPath.Host == "" { + // support absolute paths + if componentPath.Path[0] == '/' { + return componentPath, nil + } + return join(basePath, componentPath) + } + return componentPath, nil +} + +func isSingleRefElement(ref string) bool { + return !strings.Contains(ref, "#") +} + +func (loader *Loader) resolveComponent( + doc *T, + ref string, + path *url.URL, + resolved interface{}, +) ( + componentPath *url.URL, + err error, +) { + if doc, ref, componentPath, err = loader.resolveRef(doc, ref, path); err != nil { + return nil, err + } + + parsedURL, err := url.Parse(ref) + if err != nil { + return nil, fmt.Errorf("cannot parse reference: %q: %v", ref, parsedURL) + } + fragment := parsedURL.Fragment + if !strings.HasPrefix(fragment, "/") { + return nil, fmt.Errorf("expected fragment prefix '#/' in URI %q", ref) + } + + drill := func(cursor interface{}) (interface{}, error) { + for _, pathPart := range strings.Split(fragment[1:], "/") { + pathPart = unescapeRefString(pathPart) + + if cursor, err = drillIntoField(cursor, pathPart); err != nil { + e := failedToResolveRefFragmentPart(ref, pathPart) + return nil, fmt.Errorf("%s: %s", e.Error(), err.Error()) + } + if cursor == nil { + return nil, failedToResolveRefFragmentPart(ref, pathPart) + } + } + return cursor, nil + } + var cursor interface{} + if cursor, err = drill(doc); err != nil { + if path == nil { + return nil, err + } + var err2 error + data, err2 := loader.readURL(path) + if err2 != nil { + return nil, err + } + if err2 = yaml.Unmarshal(data, &cursor); err2 != nil { + return nil, err + } + if cursor, err2 = drill(cursor); err2 != nil || cursor == nil { + return nil, err + } + err = nil + } + + switch { + case reflect.TypeOf(cursor) == reflect.TypeOf(resolved): + reflect.ValueOf(resolved).Elem().Set(reflect.ValueOf(cursor).Elem()) + return componentPath, nil + + case reflect.TypeOf(cursor) == reflect.TypeOf(map[string]interface{}{}): + codec := func(got, expect interface{}) error { + enc, err := json.Marshal(got) + if err != nil { + return err + } + if err = json.Unmarshal(enc, expect); err != nil { + return err + } + return nil + } + if err := codec(cursor, resolved); err != nil { + return nil, fmt.Errorf("bad data in %q", ref) + } + return componentPath, nil + + default: + return nil, fmt.Errorf("bad data in %q", ref) + } +} + +func drillIntoField(cursor interface{}, fieldName string) (interface{}, error) { + // Special case due to multijson + if s, ok := cursor.(*SchemaRef); ok && fieldName == "additionalProperties" { + if ap := s.Value.AdditionalPropertiesAllowed; ap != nil { + return *ap, nil + } + return s.Value.AdditionalProperties, nil + } + + switch val := reflect.Indirect(reflect.ValueOf(cursor)); val.Kind() { + case reflect.Map: + elementValue := val.MapIndex(reflect.ValueOf(fieldName)) + if !elementValue.IsValid() { + return nil, fmt.Errorf("map key %q not found", fieldName) + } + return elementValue.Interface(), nil + + case reflect.Slice: + i, err := strconv.ParseUint(fieldName, 10, 32) + if err != nil { + return nil, err + } + index := int(i) + if 0 > index || index >= val.Len() { + return nil, errors.New("slice index out of bounds") + } + return val.Index(index).Interface(), nil + + case reflect.Struct: + hasFields := false + for i := 0; i < val.NumField(); i++ { + hasFields = true + field := val.Type().Field(i) + tagValue := field.Tag.Get("yaml") + yamlKey := strings.Split(tagValue, ",")[0] + if yamlKey == "-" { + tagValue := field.Tag.Get("multijson") + yamlKey = strings.Split(tagValue, ",")[0] + } + if yamlKey == fieldName { + return val.Field(i).Interface(), nil + } + } + // if cursor is a "ref wrapper" struct (e.g. RequestBodyRef), + if _, ok := val.Type().FieldByName("Value"); ok { + // try digging into its Value field + return drillIntoField(val.FieldByName("Value").Interface(), fieldName) + } + if hasFields { + if ff := val.Type().Field(0); ff.PkgPath == "" && ff.Name == "ExtensionProps" { + extensions := val.Field(0).Interface().(ExtensionProps).Extensions + if enc, ok := extensions[fieldName]; ok { + var dec interface{} + if err := json.Unmarshal(enc.(json.RawMessage), &dec); err != nil { + return nil, err + } + return dec, nil + } + } + } + return nil, fmt.Errorf("struct field %q not found", fieldName) + + default: + return nil, errors.New("not a map, slice nor struct") + } +} + +func (loader *Loader) documentPathForRecursiveRef(current *url.URL, resolvedRef string) *url.URL { + if loader.rootDir == "" { + return current + } + return &url.URL{Path: path.Join(loader.rootDir, resolvedRef)} + +} + +func (loader *Loader) resolveRef(doc *T, ref string, path *url.URL) (*T, string, *url.URL, error) { + if ref != "" && ref[0] == '#' { + return doc, ref, path, nil + } + + if err := loader.allowsExternalRefs(ref); err != nil { + return nil, "", nil, err + } + + parsedURL, err := url.Parse(ref) + if err != nil { + return nil, "", nil, fmt.Errorf("cannot parse reference: %q: %v", ref, parsedURL) + } + fragment := parsedURL.Fragment + parsedURL.Fragment = "" + + var resolvedPath *url.URL + if resolvedPath, err = resolvePath(path, parsedURL); err != nil { + return nil, "", nil, fmt.Errorf("error resolving path: %v", err) + } + + if doc, err = loader.loadFromURIInternal(resolvedPath); err != nil { + return nil, "", nil, fmt.Errorf("error resolving reference %q: %v", ref, err) + } + + return doc, "#" + fragment, resolvedPath, nil +} + +func (loader *Loader) resolveHeaderRef(doc *T, component *HeaderRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedHeader == nil { + loader.visitedHeader = make(map[*Header]struct{}) + } + if _, ok := loader.visitedHeader[component.Value]; ok { + return nil + } + loader.visitedHeader[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid header: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var header Header + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &header); err != nil { + return err + } + component.Value = &header + } else { + var resolved HeaderRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveHeaderRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + if schema := value.Schema; schema != nil { + if err := loader.resolveSchemaRef(doc, schema, documentPath); err != nil { + return err + } + } + return nil +} + +func (loader *Loader) resolveParameterRef(doc *T, component *ParameterRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedParameter == nil { + loader.visitedParameter = make(map[*Parameter]struct{}) + } + if _, ok := loader.visitedParameter[component.Value]; ok { + return nil + } + loader.visitedParameter[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid parameter: value MUST be an object") + } + ref := component.Ref + if ref != "" { + if isSingleRefElement(ref) { + var param Parameter + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, ¶m); err != nil { + return err + } + component.Value = ¶m + } else { + var resolved ParameterRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveParameterRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + if value.Content != nil && value.Schema != nil { + return errors.New("cannot contain both schema and content in a parameter") + } + for _, contentType := range value.Content { + if schema := contentType.Schema; schema != nil { + if err := loader.resolveSchemaRef(doc, schema, documentPath); err != nil { + return err + } + } + } + if schema := value.Schema; schema != nil { + if err := loader.resolveSchemaRef(doc, schema, documentPath); err != nil { + return err + } + } + return nil +} + +func (loader *Loader) resolveRequestBodyRef(doc *T, component *RequestBodyRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedRequestBody == nil { + loader.visitedRequestBody = make(map[*RequestBody]struct{}) + } + if _, ok := loader.visitedRequestBody[component.Value]; ok { + return nil + } + loader.visitedRequestBody[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid requestBody: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var requestBody RequestBody + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &requestBody); err != nil { + return err + } + component.Value = &requestBody + } else { + var resolved RequestBodyRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err = loader.resolveRequestBodyRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + for _, contentType := range value.Content { + for name, example := range contentType.Examples { + if err := loader.resolveExampleRef(doc, example, documentPath); err != nil { + return err + } + contentType.Examples[name] = example + } + if schema := contentType.Schema; schema != nil { + if err := loader.resolveSchemaRef(doc, schema, documentPath); err != nil { + return err + } + } + } + return nil +} + +func (loader *Loader) resolveResponseRef(doc *T, component *ResponseRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedResponse == nil { + loader.visitedResponse = make(map[*Response]struct{}) + } + if _, ok := loader.visitedResponse[component.Value]; ok { + return nil + } + loader.visitedResponse[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid response: value MUST be an object") + } + ref := component.Ref + if ref != "" { + if isSingleRefElement(ref) { + var resp Response + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &resp); err != nil { + return err + } + component.Value = &resp + } else { + var resolved ResponseRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveResponseRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + for _, header := range value.Headers { + if err := loader.resolveHeaderRef(doc, header, documentPath); err != nil { + return err + } + } + for _, contentType := range value.Content { + if contentType == nil { + continue + } + for name, example := range contentType.Examples { + if err := loader.resolveExampleRef(doc, example, documentPath); err != nil { + return err + } + contentType.Examples[name] = example + } + if schema := contentType.Schema; schema != nil { + if err := loader.resolveSchemaRef(doc, schema, documentPath); err != nil { + return err + } + contentType.Schema = schema + } + } + for _, link := range value.Links { + if err := loader.resolveLinkRef(doc, link, documentPath); err != nil { + return err + } + } + return nil +} + +func (loader *Loader) resolveSchemaRef(doc *T, component *SchemaRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedSchema == nil { + loader.visitedSchema = make(map[*Schema]struct{}) + } + if _, ok := loader.visitedSchema[component.Value]; ok { + return nil + } + loader.visitedSchema[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid schema: value MUST be an object") + } + ref := component.Ref + if ref != "" { + if isSingleRefElement(ref) { + var schema Schema + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &schema); err != nil { + return err + } + component.Value = &schema + } else { + var resolved SchemaRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveSchemaRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + // ResolveRefs referred schemas + if v := value.Items; v != nil { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + for _, v := range value.Properties { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + if v := value.AdditionalProperties; v != nil { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + if v := value.Not; v != nil { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + for _, v := range value.AllOf { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + for _, v := range value.AnyOf { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + for _, v := range value.OneOf { + if err := loader.resolveSchemaRef(doc, v, documentPath); err != nil { + return err + } + } + return nil +} + +func (loader *Loader) resolveSecuritySchemeRef(doc *T, component *SecuritySchemeRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedSecurityScheme == nil { + loader.visitedSecurityScheme = make(map[*SecurityScheme]struct{}) + } + if _, ok := loader.visitedSecurityScheme[component.Value]; ok { + return nil + } + loader.visitedSecurityScheme[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid securityScheme: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var scheme SecurityScheme + if _, err = loader.loadSingleElementFromURI(ref, documentPath, &scheme); err != nil { + return err + } + component.Value = &scheme + } else { + var resolved SecuritySchemeRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveSecuritySchemeRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + _ = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + return nil +} + +func (loader *Loader) resolveExampleRef(doc *T, component *ExampleRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedExample == nil { + loader.visitedExample = make(map[*Example]struct{}) + } + if _, ok := loader.visitedExample[component.Value]; ok { + return nil + } + loader.visitedExample[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid example: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var example Example + if _, err = loader.loadSingleElementFromURI(ref, documentPath, &example); err != nil { + return err + } + component.Value = &example + } else { + var resolved ExampleRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveExampleRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + _ = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + return nil +} + +func (loader *Loader) resolveCallbackRef(doc *T, component *CallbackRef, documentPath *url.URL) (err error) { + + if component == nil { + return errors.New("invalid callback: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var resolved Callback + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &resolved); err != nil { + return err + } + component.Value = &resolved + } else { + var resolved CallbackRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveCallbackRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + documentPath = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + value := component.Value + if value == nil { + return nil + } + + for entrypoint, pathItem := range *value { + entrypoint, pathItem := entrypoint, pathItem + err = func() (err error) { + key := "-" + if documentPath != nil { + key = documentPath.EscapedPath() + } + key += entrypoint + if _, ok := loader.visitedPathItemRefs[key]; ok { + return nil + } + loader.visitedPathItemRefs[key] = struct{}{} + + if pathItem == nil { + return errors.New("invalid path item: value MUST be an object") + } + ref := pathItem.Ref + if ref != "" { + if isSingleRefElement(ref) { + var p PathItem + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &p); err != nil { + return err + } + *pathItem = p + } else { + if doc, ref, documentPath, err = loader.resolveRef(doc, ref, documentPath); err != nil { + return + } + + rest := strings.TrimPrefix(ref, "#/components/callbacks/") + if rest == ref { + return fmt.Errorf(`expected prefix "#/components/callbacks/" in URI %q`, ref) + } + id := unescapeRefString(rest) + + definitions := doc.Components.Callbacks + if definitions == nil { + return failedToResolveRefFragmentPart(ref, "callbacks") + } + resolved := definitions[id] + if resolved == nil { + return failedToResolveRefFragmentPart(ref, id) + } + + for _, p := range *resolved.Value { + *pathItem = *p + break + } + } + } + return loader.resolvePathItemRefContinued(doc, pathItem, documentPath) + }() + if err != nil { + return err + } + } + return nil +} + +func (loader *Loader) resolveLinkRef(doc *T, component *LinkRef, documentPath *url.URL) (err error) { + if component != nil && component.Value != nil { + if loader.visitedLink == nil { + loader.visitedLink = make(map[*Link]struct{}) + } + if _, ok := loader.visitedLink[component.Value]; ok { + return nil + } + loader.visitedLink[component.Value] = struct{}{} + } + + if component == nil { + return errors.New("invalid link: value MUST be an object") + } + if ref := component.Ref; ref != "" { + if isSingleRefElement(ref) { + var link Link + if _, err = loader.loadSingleElementFromURI(ref, documentPath, &link); err != nil { + return err + } + component.Value = &link + } else { + var resolved LinkRef + componentPath, err := loader.resolveComponent(doc, ref, documentPath, &resolved) + if err != nil { + return err + } + if err := loader.resolveLinkRef(doc, &resolved, componentPath); err != nil { + return err + } + component.Value = resolved.Value + _ = loader.documentPathForRecursiveRef(documentPath, resolved.Ref) + } + } + return nil +} + +func (loader *Loader) resolvePathItemRef(doc *T, entrypoint string, pathItem *PathItem, documentPath *url.URL) (err error) { + key := "_" + if documentPath != nil { + key = documentPath.EscapedPath() + } + key += entrypoint + if _, ok := loader.visitedPathItemRefs[key]; ok { + return nil + } + loader.visitedPathItemRefs[key] = struct{}{} + + if pathItem == nil { + return errors.New("invalid path item: value MUST be an object") + } + ref := pathItem.Ref + if ref != "" { + if isSingleRefElement(ref) { + var p PathItem + if documentPath, err = loader.loadSingleElementFromURI(ref, documentPath, &p); err != nil { + return err + } + *pathItem = p + } else { + if doc, ref, documentPath, err = loader.resolveRef(doc, ref, documentPath); err != nil { + return + } + + rest := strings.TrimPrefix(ref, "#/paths/") + if rest == ref { + return fmt.Errorf(`expected prefix "#/paths/" in URI %q`, ref) + } + id := unescapeRefString(rest) + + definitions := doc.Paths + if definitions == nil { + return failedToResolveRefFragmentPart(ref, "paths") + } + resolved := definitions[id] + if resolved == nil { + return failedToResolveRefFragmentPart(ref, id) + } + + *pathItem = *resolved + } + } + return loader.resolvePathItemRefContinued(doc, pathItem, documentPath) +} + +func (loader *Loader) resolvePathItemRefContinued(doc *T, pathItem *PathItem, documentPath *url.URL) (err error) { + for _, parameter := range pathItem.Parameters { + if err = loader.resolveParameterRef(doc, parameter, documentPath); err != nil { + return + } + } + for _, operation := range pathItem.Operations() { + for _, parameter := range operation.Parameters { + if err = loader.resolveParameterRef(doc, parameter, documentPath); err != nil { + return + } + } + if requestBody := operation.RequestBody; requestBody != nil { + if err = loader.resolveRequestBodyRef(doc, requestBody, documentPath); err != nil { + return + } + } + for _, response := range operation.Responses { + if err = loader.resolveResponseRef(doc, response, documentPath); err != nil { + return + } + } + for _, callback := range operation.Callbacks { + if err = loader.resolveCallbackRef(doc, callback, documentPath); err != nil { + return + } + } + } + return +} + +func unescapeRefString(ref string) string { + return strings.Replace(strings.Replace(ref, "~1", "/", -1), "~0", "~", -1) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go b/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go new file mode 100644 index 00000000000..2dd0842f64e --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go @@ -0,0 +1,100 @@ +package openapi3 + +import ( + "context" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +// MediaType is specified by OpenAPI/Swagger 3.0 standard. +type MediaType struct { + ExtensionProps + + Schema *SchemaRef `json:"schema,omitempty" yaml:"schema,omitempty"` + Example interface{} `json:"example,omitempty" yaml:"example,omitempty"` + Examples Examples `json:"examples,omitempty" yaml:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty" yaml:"encoding,omitempty"` +} + +var _ jsonpointer.JSONPointable = (*MediaType)(nil) + +func NewMediaType() *MediaType { + return &MediaType{} +} + +func (mediaType *MediaType) WithSchema(schema *Schema) *MediaType { + if schema == nil { + mediaType.Schema = nil + } else { + mediaType.Schema = &SchemaRef{Value: schema} + } + return mediaType +} + +func (mediaType *MediaType) WithSchemaRef(schema *SchemaRef) *MediaType { + mediaType.Schema = schema + return mediaType +} + +func (mediaType *MediaType) WithExample(name string, value interface{}) *MediaType { + example := mediaType.Examples + if example == nil { + example = make(map[string]*ExampleRef) + mediaType.Examples = example + } + example[name] = &ExampleRef{ + Value: NewExample(value), + } + return mediaType +} + +func (mediaType *MediaType) WithEncoding(name string, enc *Encoding) *MediaType { + encoding := mediaType.Encoding + if encoding == nil { + encoding = make(map[string]*Encoding) + mediaType.Encoding = encoding + } + encoding[name] = enc + return mediaType +} + +func (mediaType *MediaType) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(mediaType) +} + +func (mediaType *MediaType) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, mediaType) +} + +func (value *MediaType) Validate(ctx context.Context) error { + if value == nil { + return nil + } + if schema := value.Schema; schema != nil { + if err := schema.Validate(ctx); err != nil { + return err + } + } + return nil +} + +func (mediaType MediaType) JSONLookup(token string) (interface{}, error) { + switch token { + case "schema": + if mediaType.Schema != nil { + if mediaType.Schema.Ref != "" { + return &Ref{Ref: mediaType.Schema.Ref}, nil + } + return mediaType.Schema.Value, nil + } + case "example": + return mediaType.Example, nil + case "examples": + return mediaType.Examples, nil + case "encoding": + return mediaType.Encoding, nil + } + v, _, err := jsonpointer.GetForToken(mediaType.ExtensionProps, token) + return v, err +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/openapi3.go b/vendor/github.com/getkin/kin-openapi/openapi3/openapi3.go new file mode 100644 index 00000000000..ee6887727b7 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/openapi3.go @@ -0,0 +1,105 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// T is the root of an OpenAPI v3 document +type T struct { + ExtensionProps + OpenAPI string `json:"openapi" yaml:"openapi"` // Required + Components Components `json:"components,omitempty" yaml:"components,omitempty"` + Info *Info `json:"info" yaml:"info"` // Required + Paths Paths `json:"paths" yaml:"paths"` // Required + Security SecurityRequirements `json:"security,omitempty" yaml:"security,omitempty"` + Servers Servers `json:"servers,omitempty" yaml:"servers,omitempty"` + Tags Tags `json:"tags,omitempty" yaml:"tags,omitempty"` + ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` +} + +func (doc *T) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(doc) +} + +func (doc *T) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, doc) +} + +func (doc *T) AddOperation(path string, method string, operation *Operation) { + paths := doc.Paths + if paths == nil { + paths = make(Paths) + doc.Paths = paths + } + pathItem := paths[path] + if pathItem == nil { + pathItem = &PathItem{} + paths[path] = pathItem + } + pathItem.SetOperation(method, operation) +} + +func (doc *T) AddServer(server *Server) { + doc.Servers = append(doc.Servers, server) +} + +func (value *T) Validate(ctx context.Context) error { + if value.OpenAPI == "" { + return errors.New("value of openapi must be a non-empty string") + } + + // NOTE: only mention info/components/paths/... key in this func's errors. + + { + wrap := func(e error) error { return fmt.Errorf("invalid components: %v", e) } + if err := value.Components.Validate(ctx); err != nil { + return wrap(err) + } + } + + { + wrap := func(e error) error { return fmt.Errorf("invalid info: %v", e) } + if v := value.Info; v != nil { + if err := v.Validate(ctx); err != nil { + return wrap(err) + } + } else { + return wrap(errors.New("must be an object")) + } + } + + { + wrap := func(e error) error { return fmt.Errorf("invalid paths: %v", e) } + if v := value.Paths; v != nil { + if err := v.Validate(ctx); err != nil { + return wrap(err) + } + } else { + return wrap(errors.New("must be an object")) + } + } + + { + wrap := func(e error) error { return fmt.Errorf("invalid security: %v", e) } + if v := value.Security; v != nil { + if err := v.Validate(ctx); err != nil { + return wrap(err) + } + } + } + + { + wrap := func(e error) error { return fmt.Errorf("invalid servers: %v", e) } + if v := value.Servers; v != nil { + if err := v.Validate(ctx); err != nil { + return wrap(err) + } + } + } + + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/operation.go b/vendor/github.com/getkin/kin-openapi/openapi3/operation.go new file mode 100644 index 00000000000..0de7c421af0 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/operation.go @@ -0,0 +1,142 @@ +package openapi3 + +import ( + "context" + "errors" + "strconv" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +// Operation represents "operation" specified by" OpenAPI/Swagger 3.0 standard. +type Operation struct { + ExtensionProps + + // Optional tags for documentation. + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` + + // Optional short summary. + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + + // Optional description. Should use CommonMark syntax. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + + // Optional operation ID. + OperationID string `json:"operationId,omitempty" yaml:"operationId,omitempty"` + + // Optional parameters. + Parameters Parameters `json:"parameters,omitempty" yaml:"parameters,omitempty"` + + // Optional body parameter. + RequestBody *RequestBodyRef `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` + + // Responses. + Responses Responses `json:"responses" yaml:"responses"` // Required + + // Optional callbacks + Callbacks Callbacks `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` + + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` + + // Optional security requirements that overrides top-level security. + Security *SecurityRequirements `json:"security,omitempty" yaml:"security,omitempty"` + + // Optional servers that overrides top-level servers. + Servers *Servers `json:"servers,omitempty" yaml:"servers,omitempty"` + + ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` +} + +var _ jsonpointer.JSONPointable = (*Operation)(nil) + +func NewOperation() *Operation { + return &Operation{} +} + +func (operation *Operation) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(operation) +} + +func (operation *Operation) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, operation) +} + +func (operation Operation) JSONLookup(token string) (interface{}, error) { + switch token { + case "requestBody": + if operation.RequestBody != nil { + if operation.RequestBody.Ref != "" { + return &Ref{Ref: operation.RequestBody.Ref}, nil + } + return operation.RequestBody.Value, nil + } + case "tags": + return operation.Tags, nil + case "summary": + return operation.Summary, nil + case "description": + return operation.Description, nil + case "operationID": + return operation.OperationID, nil + case "parameters": + return operation.Parameters, nil + case "responses": + return operation.Responses, nil + case "callbacks": + return operation.Callbacks, nil + case "deprecated": + return operation.Deprecated, nil + case "security": + return operation.Security, nil + case "servers": + return operation.Servers, nil + case "externalDocs": + return operation.ExternalDocs, nil + } + + v, _, err := jsonpointer.GetForToken(operation.ExtensionProps, token) + return v, err +} + +func (operation *Operation) AddParameter(p *Parameter) { + operation.Parameters = append(operation.Parameters, &ParameterRef{ + Value: p, + }) +} + +func (operation *Operation) AddResponse(status int, response *Response) { + responses := operation.Responses + if responses == nil { + responses = NewResponses() + operation.Responses = responses + } + code := "default" + if status != 0 { + code = strconv.FormatInt(int64(status), 10) + } + responses[code] = &ResponseRef{ + Value: response, + } +} + +func (value *Operation) Validate(ctx context.Context) error { + if v := value.Parameters; v != nil { + if err := v.Validate(ctx); err != nil { + return err + } + } + if v := value.RequestBody; v != nil { + if err := v.Validate(ctx); err != nil { + return err + } + } + if v := value.Responses; v != nil { + if err := v.Validate(ctx); err != nil { + return err + } + } else { + return errors.New("value of responses must be an object") + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go b/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go new file mode 100644 index 00000000000..2081e4e1d92 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go @@ -0,0 +1,305 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type ParametersMap map[string]*ParameterRef + +var _ jsonpointer.JSONPointable = (*ParametersMap)(nil) + +func (p ParametersMap) JSONLookup(token string) (interface{}, error) { + ref, ok := p[token] + if ref == nil || ok == false { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Parameters is specified by OpenAPI/Swagger 3.0 standard. +type Parameters []*ParameterRef + +var _ jsonpointer.JSONPointable = (*Parameters)(nil) + +func (p Parameters) JSONLookup(token string) (interface{}, error) { + index, err := strconv.Atoi(token) + if err != nil { + return nil, err + } + + if index < 0 || index >= len(p) { + return nil, fmt.Errorf("index %d out of bounds of array of length %d", index, len(p)) + } + + ref := p[index] + + if ref != nil && ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +func NewParameters() Parameters { + return make(Parameters, 0, 4) +} + +func (parameters Parameters) GetByInAndName(in string, name string) *Parameter { + for _, item := range parameters { + if v := item.Value; v != nil { + if v.Name == name && v.In == in { + return v + } + } + } + return nil +} + +func (value Parameters) Validate(ctx context.Context) error { + dupes := make(map[string]struct{}) + for _, item := range value { + if v := item.Value; v != nil { + key := v.In + ":" + v.Name + if _, ok := dupes[key]; ok { + return fmt.Errorf("more than one %q parameter has name %q", v.In, v.Name) + } + dupes[key] = struct{}{} + } + + if err := item.Validate(ctx); err != nil { + return err + } + } + return nil +} + +// Parameter is specified by OpenAPI/Swagger 3.0 standard. +// See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md#parameterObject +type Parameter struct { + ExtensionProps + Name string `json:"name,omitempty" yaml:"name,omitempty"` + In string `json:"in,omitempty" yaml:"in,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Style string `json:"style,omitempty" yaml:"style,omitempty"` + Explode *bool `json:"explode,omitempty" yaml:"explode,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` + AllowReserved bool `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` + Required bool `json:"required,omitempty" yaml:"required,omitempty"` + Schema *SchemaRef `json:"schema,omitempty" yaml:"schema,omitempty"` + Example interface{} `json:"example,omitempty" yaml:"example,omitempty"` + Examples Examples `json:"examples,omitempty" yaml:"examples,omitempty"` + Content Content `json:"content,omitempty" yaml:"content,omitempty"` +} + +var _ jsonpointer.JSONPointable = (*Parameter)(nil) + +const ( + ParameterInPath = "path" + ParameterInQuery = "query" + ParameterInHeader = "header" + ParameterInCookie = "cookie" +) + +func NewPathParameter(name string) *Parameter { + return &Parameter{ + Name: name, + In: ParameterInPath, + Required: true, + } +} + +func NewQueryParameter(name string) *Parameter { + return &Parameter{ + Name: name, + In: ParameterInQuery, + } +} + +func NewHeaderParameter(name string) *Parameter { + return &Parameter{ + Name: name, + In: ParameterInHeader, + } +} + +func NewCookieParameter(name string) *Parameter { + return &Parameter{ + Name: name, + In: ParameterInCookie, + } +} + +func (parameter *Parameter) WithDescription(value string) *Parameter { + parameter.Description = value + return parameter +} + +func (parameter *Parameter) WithRequired(value bool) *Parameter { + parameter.Required = value + return parameter +} + +func (parameter *Parameter) WithSchema(value *Schema) *Parameter { + if value == nil { + parameter.Schema = nil + } else { + parameter.Schema = &SchemaRef{ + Value: value, + } + } + return parameter +} + +func (parameter *Parameter) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(parameter) +} + +func (parameter *Parameter) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, parameter) +} + +func (value Parameter) JSONLookup(token string) (interface{}, error) { + switch token { + case "schema": + if value.Schema != nil { + if value.Schema.Ref != "" { + return &Ref{Ref: value.Schema.Ref}, nil + } + return value.Schema.Value, nil + } + case "name": + return value.Name, nil + case "in": + return value.In, nil + case "description": + return value.Description, nil + case "style": + return value.Style, nil + case "explode": + return value.Explode, nil + case "allowEmptyValue": + return value.AllowEmptyValue, nil + case "allowReserved": + return value.AllowReserved, nil + case "deprecated": + return value.Deprecated, nil + case "required": + return value.Required, nil + case "example": + return value.Example, nil + case "examples": + return value.Examples, nil + case "content": + return value.Content, nil + } + + v, _, err := jsonpointer.GetForToken(value.ExtensionProps, token) + return v, err +} + +// SerializationMethod returns a parameter's serialization method. +// When a parameter's serialization method is not defined the method returns +// the default serialization method corresponding to a parameter's location. +func (parameter *Parameter) SerializationMethod() (*SerializationMethod, error) { + switch parameter.In { + case ParameterInPath, ParameterInHeader: + style := parameter.Style + if style == "" { + style = SerializationSimple + } + explode := false + if parameter.Explode != nil { + explode = *parameter.Explode + } + return &SerializationMethod{Style: style, Explode: explode}, nil + case ParameterInQuery, ParameterInCookie: + style := parameter.Style + if style == "" { + style = SerializationForm + } + explode := true + if parameter.Explode != nil { + explode = *parameter.Explode + } + return &SerializationMethod{Style: style, Explode: explode}, nil + default: + return nil, fmt.Errorf("unexpected parameter's 'in': %q", parameter.In) + } +} + +func (value *Parameter) Validate(ctx context.Context) error { + if value.Name == "" { + return errors.New("parameter name can't be blank") + } + in := value.In + switch in { + case + ParameterInPath, + ParameterInQuery, + ParameterInHeader, + ParameterInCookie: + default: + return fmt.Errorf("parameter can't have 'in' value %q", value.In) + } + + // Validate a parameter's serialization method. + sm, err := value.SerializationMethod() + if err != nil { + return err + } + var smSupported bool + switch { + case value.In == ParameterInPath && sm.Style == SerializationSimple && !sm.Explode, + value.In == ParameterInPath && sm.Style == SerializationSimple && sm.Explode, + value.In == ParameterInPath && sm.Style == SerializationLabel && !sm.Explode, + value.In == ParameterInPath && sm.Style == SerializationLabel && sm.Explode, + value.In == ParameterInPath && sm.Style == SerializationMatrix && !sm.Explode, + value.In == ParameterInPath && sm.Style == SerializationMatrix && sm.Explode, + + value.In == ParameterInQuery && sm.Style == SerializationForm && sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationForm && !sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationSpaceDelimited && sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationSpaceDelimited && !sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationPipeDelimited && sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationPipeDelimited && !sm.Explode, + value.In == ParameterInQuery && sm.Style == SerializationDeepObject && sm.Explode, + + value.In == ParameterInHeader && sm.Style == SerializationSimple && !sm.Explode, + value.In == ParameterInHeader && sm.Style == SerializationSimple && sm.Explode, + + value.In == ParameterInCookie && sm.Style == SerializationForm && !sm.Explode, + value.In == ParameterInCookie && sm.Style == SerializationForm && sm.Explode: + smSupported = true + } + if !smSupported { + e := fmt.Errorf("serialization method with style=%q and explode=%v is not supported by a %s parameter", sm.Style, sm.Explode, in) + return fmt.Errorf("parameter %q schema is invalid: %v", value.Name, e) + } + + if (value.Schema == nil) == (value.Content == nil) { + e := errors.New("parameter must contain exactly one of content and schema") + return fmt.Errorf("parameter %q schema is invalid: %v", value.Name, e) + } + if schema := value.Schema; schema != nil { + if err := schema.Validate(ctx); err != nil { + return fmt.Errorf("parameter %q schema is invalid: %v", value.Name, err) + } + } + + if content := value.Content; content != nil { + if err := content.Validate(ctx); err != nil { + return fmt.Errorf("parameter %q content is invalid: %v", value.Name, err) + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go b/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go new file mode 100644 index 00000000000..a6650204639 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go @@ -0,0 +1,126 @@ +package openapi3 + +import ( + "context" + "fmt" + "net/http" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +type PathItem struct { + ExtensionProps + Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"` + Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Connect *Operation `json:"connect,omitempty" yaml:"connect,omitempty"` + Delete *Operation `json:"delete,omitempty" yaml:"delete,omitempty"` + Get *Operation `json:"get,omitempty" yaml:"get,omitempty"` + Head *Operation `json:"head,omitempty" yaml:"head,omitempty"` + Options *Operation `json:"options,omitempty" yaml:"options,omitempty"` + Patch *Operation `json:"patch,omitempty" yaml:"patch,omitempty"` + Post *Operation `json:"post,omitempty" yaml:"post,omitempty"` + Put *Operation `json:"put,omitempty" yaml:"put,omitempty"` + Trace *Operation `json:"trace,omitempty" yaml:"trace,omitempty"` + Servers Servers `json:"servers,omitempty" yaml:"servers,omitempty"` + Parameters Parameters `json:"parameters,omitempty" yaml:"parameters,omitempty"` +} + +func (pathItem *PathItem) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(pathItem) +} + +func (pathItem *PathItem) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, pathItem) +} + +func (pathItem *PathItem) Operations() map[string]*Operation { + operations := make(map[string]*Operation, 4) + if v := pathItem.Connect; v != nil { + operations[http.MethodConnect] = v + } + if v := pathItem.Delete; v != nil { + operations[http.MethodDelete] = v + } + if v := pathItem.Get; v != nil { + operations[http.MethodGet] = v + } + if v := pathItem.Head; v != nil { + operations[http.MethodHead] = v + } + if v := pathItem.Options; v != nil { + operations[http.MethodOptions] = v + } + if v := pathItem.Patch; v != nil { + operations[http.MethodPatch] = v + } + if v := pathItem.Post; v != nil { + operations[http.MethodPost] = v + } + if v := pathItem.Put; v != nil { + operations[http.MethodPut] = v + } + if v := pathItem.Trace; v != nil { + operations[http.MethodTrace] = v + } + return operations +} + +func (pathItem *PathItem) GetOperation(method string) *Operation { + switch method { + case http.MethodConnect: + return pathItem.Connect + case http.MethodDelete: + return pathItem.Delete + case http.MethodGet: + return pathItem.Get + case http.MethodHead: + return pathItem.Head + case http.MethodOptions: + return pathItem.Options + case http.MethodPatch: + return pathItem.Patch + case http.MethodPost: + return pathItem.Post + case http.MethodPut: + return pathItem.Put + case http.MethodTrace: + return pathItem.Trace + default: + panic(fmt.Errorf("unsupported HTTP method %q", method)) + } +} + +func (pathItem *PathItem) SetOperation(method string, operation *Operation) { + switch method { + case http.MethodConnect: + pathItem.Connect = operation + case http.MethodDelete: + pathItem.Delete = operation + case http.MethodGet: + pathItem.Get = operation + case http.MethodHead: + pathItem.Head = operation + case http.MethodOptions: + pathItem.Options = operation + case http.MethodPatch: + pathItem.Patch = operation + case http.MethodPost: + pathItem.Post = operation + case http.MethodPut: + pathItem.Put = operation + case http.MethodTrace: + pathItem.Trace = operation + default: + panic(fmt.Errorf("unsupported HTTP method %q", method)) + } +} + +func (value *PathItem) Validate(ctx context.Context) error { + for _, operation := range value.Operations() { + if err := operation.Validate(ctx); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/paths.go b/vendor/github.com/getkin/kin-openapi/openapi3/paths.go new file mode 100644 index 00000000000..bdb87ae7dc7 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/paths.go @@ -0,0 +1,164 @@ +package openapi3 + +import ( + "context" + "fmt" + "strings" +) + +// Paths is specified by OpenAPI/Swagger standard version 3.0. +type Paths map[string]*PathItem + +func (value Paths) Validate(ctx context.Context) error { + normalizedPaths := make(map[string]string) + for path, pathItem := range value { + if path == "" || path[0] != '/' { + return fmt.Errorf("path %q does not start with a forward slash (/)", path) + } + + if pathItem == nil { + value[path] = &PathItem{} + pathItem = value[path] + } + + normalizedPath, _, varsInPath := normalizeTemplatedPath(path) + if oldPath, ok := normalizedPaths[normalizedPath]; ok { + return fmt.Errorf("conflicting paths %q and %q", path, oldPath) + } + normalizedPaths[path] = path + + var commonParams []string + for _, parameterRef := range pathItem.Parameters { + if parameterRef != nil { + if parameter := parameterRef.Value; parameter != nil && parameter.In == ParameterInPath { + commonParams = append(commonParams, parameter.Name) + } + } + } + for method, operation := range pathItem.Operations() { + var setParams []string + for _, parameterRef := range operation.Parameters { + if parameterRef != nil { + if parameter := parameterRef.Value; parameter != nil && parameter.In == ParameterInPath { + setParams = append(setParams, parameter.Name) + } + } + } + if expected := len(setParams) + len(commonParams); expected != len(varsInPath) { + expected -= len(varsInPath) + if expected < 0 { + expected *= -1 + } + missing := make(map[string]struct{}, expected) + definedParams := append(setParams, commonParams...) + for _, name := range definedParams { + if _, ok := varsInPath[name]; !ok { + missing[name] = struct{}{} + } + } + for name := range varsInPath { + got := false + for _, othername := range definedParams { + if othername == name { + got = true + break + } + } + if !got { + missing[name] = struct{}{} + } + } + if len(missing) != 0 { + missings := make([]string, 0, len(missing)) + for name := range missing { + missings = append(missings, name) + } + return fmt.Errorf("operation %s %s must define exactly all path parameters (missing: %v)", method, path, missings) + } + } + } + + if err := pathItem.Validate(ctx); err != nil { + return err + } + } + return nil +} + +// Find returns a path that matches the key. +// +// The method ignores differences in template variable names (except possible "*" suffix). +// +// For example: +// +// paths := openapi3.Paths { +// "/person/{personName}": &openapi3.PathItem{}, +// } +// pathItem := path.Find("/person/{name}") +// +// would return the correct path item. +func (paths Paths) Find(key string) *PathItem { + // Try directly access the map + pathItem := paths[key] + if pathItem != nil { + return pathItem + } + + normalizedPath, expected, _ := normalizeTemplatedPath(key) + for path, pathItem := range paths { + pathNormalized, got, _ := normalizeTemplatedPath(path) + if got == expected && pathNormalized == normalizedPath { + return pathItem + } + } + return nil +} + +func normalizeTemplatedPath(path string) (string, uint, map[string]struct{}) { + if strings.IndexByte(path, '{') < 0 { + return path, 0, nil + } + + var buffTpl strings.Builder + buffTpl.Grow(len(path)) + + var ( + cc rune + count uint + isVariable bool + vars = make(map[string]struct{}) + buffVar strings.Builder + ) + for i, c := range path { + if isVariable { + if c == '}' { + // End path variable + isVariable = false + + vars[buffVar.String()] = struct{}{} + buffVar = strings.Builder{} + + // First append possible '*' before this character + // The character '}' will be appended + if i > 0 && cc == '*' { + buffTpl.WriteRune(cc) + } + } else { + buffVar.WriteRune(c) + continue + } + + } else if c == '{' { + // Begin path variable + isVariable = true + + // The character '{' will be appended + count++ + } + + // Append the character + buffTpl.WriteRune(c) + cc = c + } + return buffTpl.String(), count, vars +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/refs.go b/vendor/github.com/getkin/kin-openapi/openapi3/refs.go new file mode 100644 index 00000000000..31225092989 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/refs.go @@ -0,0 +1,306 @@ +package openapi3 + +import ( + "context" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +// Ref is specified by OpenAPI/Swagger 3.0 standard. +type Ref struct { + Ref string `json:"$ref" yaml:"$ref"` +} + +// CallbackRef represents either a Callback or a $ref to a Callback. +// When serializing and both fields are set, Ref is preferred over Value. +type CallbackRef struct { + Ref string + Value *Callback +} + +var _ jsonpointer.JSONPointable = (*CallbackRef)(nil) + +func (value *CallbackRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *CallbackRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *CallbackRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value CallbackRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// ExampleRef represents either a Example or a $ref to a Example. +// When serializing and both fields are set, Ref is preferred over Value. +type ExampleRef struct { + Ref string + Value *Example +} + +var _ jsonpointer.JSONPointable = (*ExampleRef)(nil) + +func (value *ExampleRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *ExampleRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *ExampleRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value ExampleRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// HeaderRef represents either a Header or a $ref to a Header. +// When serializing and both fields are set, Ref is preferred over Value. +type HeaderRef struct { + Ref string + Value *Header +} + +var _ jsonpointer.JSONPointable = (*HeaderRef)(nil) + +func (value *HeaderRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *HeaderRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *HeaderRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value HeaderRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// LinkRef represents either a Link or a $ref to a Link. +// When serializing and both fields are set, Ref is preferred over Value. +type LinkRef struct { + Ref string + Value *Link +} + +func (value *LinkRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *LinkRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *LinkRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +// ParameterRef represents either a Parameter or a $ref to a Parameter. +// When serializing and both fields are set, Ref is preferred over Value. +type ParameterRef struct { + Ref string + Value *Parameter +} + +var _ jsonpointer.JSONPointable = (*ParameterRef)(nil) + +func (value *ParameterRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *ParameterRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *ParameterRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value ParameterRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// ResponseRef represents either a Response or a $ref to a Response. +// When serializing and both fields are set, Ref is preferred over Value. +type ResponseRef struct { + Ref string + Value *Response +} + +var _ jsonpointer.JSONPointable = (*ResponseRef)(nil) + +func (value *ResponseRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *ResponseRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *ResponseRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value ResponseRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// RequestBodyRef represents either a RequestBody or a $ref to a RequestBody. +// When serializing and both fields are set, Ref is preferred over Value. +type RequestBodyRef struct { + Ref string + Value *RequestBody +} + +var _ jsonpointer.JSONPointable = (*RequestBodyRef)(nil) + +func (value *RequestBodyRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *RequestBodyRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *RequestBodyRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value RequestBodyRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// SchemaRef represents either a Schema or a $ref to a Schema. +// When serializing and both fields are set, Ref is preferred over Value. +type SchemaRef struct { + Ref string + Value *Schema +} + +var _ jsonpointer.JSONPointable = (*SchemaRef)(nil) + +func NewSchemaRef(ref string, value *Schema) *SchemaRef { + return &SchemaRef{ + Ref: ref, + Value: value, + } +} + +func (value *SchemaRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *SchemaRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *SchemaRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value SchemaRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} + +// SecuritySchemeRef represents either a SecurityScheme or a $ref to a SecurityScheme. +// When serializing and both fields are set, Ref is preferred over Value. +type SecuritySchemeRef struct { + Ref string + Value *SecurityScheme +} + +var _ jsonpointer.JSONPointable = (*SecuritySchemeRef)(nil) + +func (value *SecuritySchemeRef) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalRef(value.Ref, value.Value) +} + +func (value *SecuritySchemeRef) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) +} + +func (value *SecuritySchemeRef) Validate(ctx context.Context) error { + if v := value.Value; v != nil { + return v.Validate(ctx) + } + return foundUnresolvedRef(value.Ref) +} + +func (value SecuritySchemeRef) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return value.Ref, nil + } + + ptr, _, err := jsonpointer.GetForToken(value.Value, token) + return ptr, err +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go b/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go new file mode 100644 index 00000000000..66b512fa0aa --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go @@ -0,0 +1,107 @@ +package openapi3 + +import ( + "context" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type RequestBodies map[string]*RequestBodyRef + +var _ jsonpointer.JSONPointable = (*RequestBodyRef)(nil) + +func (r RequestBodies) JSONLookup(token string) (interface{}, error) { + ref, ok := r[token] + if ok == false { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref != nil && ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// RequestBody is specified by OpenAPI/Swagger 3.0 standard. +type RequestBody struct { + ExtensionProps + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Required bool `json:"required,omitempty" yaml:"required,omitempty"` + Content Content `json:"content,omitempty" yaml:"content,omitempty"` +} + +func NewRequestBody() *RequestBody { + return &RequestBody{} +} + +func (requestBody *RequestBody) WithDescription(value string) *RequestBody { + requestBody.Description = value + return requestBody +} + +func (requestBody *RequestBody) WithRequired(value bool) *RequestBody { + requestBody.Required = value + return requestBody +} + +func (requestBody *RequestBody) WithContent(content Content) *RequestBody { + requestBody.Content = content + return requestBody +} + +func (requestBody *RequestBody) WithSchemaRef(value *SchemaRef, consumes []string) *RequestBody { + requestBody.Content = NewContentWithSchemaRef(value, consumes) + return requestBody +} + +func (requestBody *RequestBody) WithSchema(value *Schema, consumes []string) *RequestBody { + requestBody.Content = NewContentWithSchema(value, consumes) + return requestBody +} + +func (requestBody *RequestBody) WithJSONSchemaRef(value *SchemaRef) *RequestBody { + requestBody.Content = NewContentWithJSONSchemaRef(value) + return requestBody +} + +func (requestBody *RequestBody) WithJSONSchema(value *Schema) *RequestBody { + requestBody.Content = NewContentWithJSONSchema(value) + return requestBody +} + +func (requestBody *RequestBody) WithFormDataSchemaRef(value *SchemaRef) *RequestBody { + requestBody.Content = NewContentWithFormDataSchemaRef(value) + return requestBody +} + +func (requestBody *RequestBody) WithFormDataSchema(value *Schema) *RequestBody { + requestBody.Content = NewContentWithFormDataSchema(value) + return requestBody +} + +func (requestBody *RequestBody) GetMediaType(mediaType string) *MediaType { + m := requestBody.Content + if m == nil { + return nil + } + return m[mediaType] +} + +func (requestBody *RequestBody) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(requestBody) +} + +func (requestBody *RequestBody) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, requestBody) +} + +func (value *RequestBody) Validate(ctx context.Context) error { + if v := value.Content; v != nil { + if err := v.Validate(ctx); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/response.go b/vendor/github.com/getkin/kin-openapi/openapi3/response.go new file mode 100644 index 00000000000..2ab33aca28e --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/response.go @@ -0,0 +1,108 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +// Responses is specified by OpenAPI/Swagger 3.0 standard. +type Responses map[string]*ResponseRef + +var _ jsonpointer.JSONPointable = (*Responses)(nil) + +func NewResponses() Responses { + r := make(Responses) + r["default"] = &ResponseRef{Value: NewResponse().WithDescription("")} + return r +} + +func (responses Responses) Default() *ResponseRef { + return responses["default"] +} + +func (responses Responses) Get(status int) *ResponseRef { + return responses[strconv.FormatInt(int64(status), 10)] +} + +func (value Responses) Validate(ctx context.Context) error { + if len(value) == 0 { + return errors.New("the responses object MUST contain at least one response code") + } + for _, v := range value { + if err := v.Validate(ctx); err != nil { + return err + } + } + return nil +} + +func (responses Responses) JSONLookup(token string) (interface{}, error) { + ref, ok := responses[token] + if ok == false { + return nil, fmt.Errorf("invalid token reference: %q", token) + } + + if ref != nil && ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Response is specified by OpenAPI/Swagger 3.0 standard. +type Response struct { + ExtensionProps + Description *string `json:"description,omitempty" yaml:"description,omitempty"` + Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty"` + Content Content `json:"content,omitempty" yaml:"content,omitempty"` + Links Links `json:"links,omitempty" yaml:"links,omitempty"` +} + +func NewResponse() *Response { + return &Response{} +} + +func (response *Response) WithDescription(value string) *Response { + response.Description = &value + return response +} + +func (response *Response) WithContent(content Content) *Response { + response.Content = content + return response +} + +func (response *Response) WithJSONSchema(schema *Schema) *Response { + response.Content = NewContentWithJSONSchema(schema) + return response +} + +func (response *Response) WithJSONSchemaRef(schema *SchemaRef) *Response { + response.Content = NewContentWithJSONSchemaRef(schema) + return response +} + +func (response *Response) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(response) +} + +func (response *Response) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, response) +} + +func (value *Response) Validate(ctx context.Context) error { + if value.Description == nil { + return errors.New("a short description of the response is required") + } + + if content := value.Content; content != nil { + if err := content.Validate(ctx); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/schema.go b/vendor/github.com/getkin/kin-openapi/openapi3/schema.go new file mode 100644 index 00000000000..c1730b6ad80 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/schema.go @@ -0,0 +1,1620 @@ +package openapi3 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math" + "math/big" + "regexp" + "strconv" + "unicode/utf16" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +const ( + TypeArray = "array" + TypeBoolean = "boolean" + TypeInteger = "integer" + TypeNumber = "number" + TypeObject = "object" + TypeString = "string" +) + +var ( + // SchemaErrorDetailsDisabled disables printing of details about schema errors. + SchemaErrorDetailsDisabled = false + + //SchemaFormatValidationDisabled disables validation of schema type formats. + SchemaFormatValidationDisabled = false + + errSchema = errors.New("input does not match the schema") + + // ErrOneOfConflict is the SchemaError Origin when data matches more than one oneOf schema + ErrOneOfConflict = errors.New("input matches more than one oneOf schemas") + + // ErrSchemaInputNaN may be returned when validating a number + ErrSchemaInputNaN = errors.New("floating point NaN is not allowed") + // ErrSchemaInputInf may be returned when validating a number + ErrSchemaInputInf = errors.New("floating point Inf is not allowed") +) + +// Float64Ptr is a helper for defining OpenAPI schemas. +func Float64Ptr(value float64) *float64 { + return &value +} + +// BoolPtr is a helper for defining OpenAPI schemas. +func BoolPtr(value bool) *bool { + return &value +} + +// Int64Ptr is a helper for defining OpenAPI schemas. +func Int64Ptr(value int64) *int64 { + return &value +} + +// Uint64Ptr is a helper for defining OpenAPI schemas. +func Uint64Ptr(value uint64) *uint64 { + return &value +} + +type Schemas map[string]*SchemaRef + +var _ jsonpointer.JSONPointable = (*Schemas)(nil) + +func (s Schemas) JSONLookup(token string) (interface{}, error) { + ref, ok := s[token] + if ref == nil || ok == false { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +type SchemaRefs []*SchemaRef + +var _ jsonpointer.JSONPointable = (*SchemaRefs)(nil) + +func (s SchemaRefs) JSONLookup(token string) (interface{}, error) { + i, err := strconv.ParseUint(token, 10, 64) + if err != nil { + return nil, err + } + + if i >= uint64(len(s)) { + return nil, fmt.Errorf("index out of range: %d", i) + } + + ref := s[i] + + if ref == nil || ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +// Schema is specified by OpenAPI/Swagger 3.0 standard. +type Schema struct { + ExtensionProps + + OneOf SchemaRefs `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` + AnyOf SchemaRefs `json:"anyOf,omitempty" yaml:"anyOf,omitempty"` + AllOf SchemaRefs `json:"allOf,omitempty" yaml:"allOf,omitempty"` + Not *SchemaRef `json:"not,omitempty" yaml:"not,omitempty"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Title string `json:"title,omitempty" yaml:"title,omitempty"` + Format string `json:"format,omitempty" yaml:"format,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Enum []interface{} `json:"enum,omitempty" yaml:"enum,omitempty"` + Default interface{} `json:"default,omitempty" yaml:"default,omitempty"` + Example interface{} `json:"example,omitempty" yaml:"example,omitempty"` + ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` + + // Array-related, here for struct compactness + UniqueItems bool `json:"uniqueItems,omitempty" yaml:"uniqueItems,omitempty"` + // Number-related, here for struct compactness + ExclusiveMin bool `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"` + ExclusiveMax bool `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"` + // Properties + Nullable bool `json:"nullable,omitempty" yaml:"nullable,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` + WriteOnly bool `json:"writeOnly,omitempty" yaml:"writeOnly,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` + XML interface{} `json:"xml,omitempty" yaml:"xml,omitempty"` + Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` + + // Number + Min *float64 `json:"minimum,omitempty" yaml:"minimum,omitempty"` + Max *float64 `json:"maximum,omitempty" yaml:"maximum,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"` + + // String + MinLength uint64 `json:"minLength,omitempty" yaml:"minLength,omitempty"` + MaxLength *uint64 `json:"maxLength,omitempty" yaml:"maxLength,omitempty"` + Pattern string `json:"pattern,omitempty" yaml:"pattern,omitempty"` + compiledPattern *regexp.Regexp + + // Array + MinItems uint64 `json:"minItems,omitempty" yaml:"minItems,omitempty"` + MaxItems *uint64 `json:"maxItems,omitempty" yaml:"maxItems,omitempty"` + Items *SchemaRef `json:"items,omitempty" yaml:"items,omitempty"` + + // Object + Required []string `json:"required,omitempty" yaml:"required,omitempty"` + Properties Schemas `json:"properties,omitempty" yaml:"properties,omitempty"` + MinProps uint64 `json:"minProperties,omitempty" yaml:"minProperties,omitempty"` + MaxProps *uint64 `json:"maxProperties,omitempty" yaml:"maxProperties,omitempty"` + AdditionalPropertiesAllowed *bool `multijson:"additionalProperties,omitempty" json:"-" yaml:"-"` // In this order... + AdditionalProperties *SchemaRef `multijson:"additionalProperties,omitempty" json:"-" yaml:"-"` // ...for multijson + Discriminator *Discriminator `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` +} + +var _ jsonpointer.JSONPointable = (*Schema)(nil) + +func NewSchema() *Schema { + return &Schema{} +} + +func (schema *Schema) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(schema) +} + +func (schema *Schema) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, schema) +} + +func (schema Schema) JSONLookup(token string) (interface{}, error) { + switch token { + case "additionalProperties": + if schema.AdditionalProperties != nil { + if schema.AdditionalProperties.Ref != "" { + return &Ref{Ref: schema.AdditionalProperties.Ref}, nil + } + return schema.AdditionalProperties.Value, nil + } + case "not": + if schema.Not != nil { + if schema.Not.Ref != "" { + return &Ref{Ref: schema.Not.Ref}, nil + } + return schema.Not.Value, nil + } + case "items": + if schema.Items != nil { + if schema.Items.Ref != "" { + return &Ref{Ref: schema.Items.Ref}, nil + } + return schema.Items.Value, nil + } + case "oneOf": + return schema.OneOf, nil + case "anyOf": + return schema.AnyOf, nil + case "allOf": + return schema.AllOf, nil + case "type": + return schema.Type, nil + case "title": + return schema.Title, nil + case "format": + return schema.Format, nil + case "description": + return schema.Description, nil + case "enum": + return schema.Enum, nil + case "default": + return schema.Default, nil + case "example": + return schema.Example, nil + case "externalDocs": + return schema.ExternalDocs, nil + case "additionalPropertiesAllowed": + return schema.AdditionalPropertiesAllowed, nil + case "uniqueItems": + return schema.UniqueItems, nil + case "exclusiveMin": + return schema.ExclusiveMin, nil + case "exclusiveMax": + return schema.ExclusiveMax, nil + case "nullable": + return schema.Nullable, nil + case "readOnly": + return schema.ReadOnly, nil + case "writeOnly": + return schema.WriteOnly, nil + case "allowEmptyValue": + return schema.AllowEmptyValue, nil + case "xml": + return schema.XML, nil + case "deprecated": + return schema.Deprecated, nil + case "min": + return schema.Min, nil + case "max": + return schema.Max, nil + case "multipleOf": + return schema.MultipleOf, nil + case "minLength": + return schema.MinLength, nil + case "maxLength": + return schema.MaxLength, nil + case "pattern": + return schema.Pattern, nil + case "minItems": + return schema.MinItems, nil + case "maxItems": + return schema.MaxItems, nil + case "required": + return schema.Required, nil + case "properties": + return schema.Properties, nil + case "minProps": + return schema.MinProps, nil + case "maxProps": + return schema.MaxProps, nil + case "discriminator": + return schema.Discriminator, nil + } + + v, _, err := jsonpointer.GetForToken(schema.ExtensionProps, token) + return v, err +} + +func (schema *Schema) NewRef() *SchemaRef { + return &SchemaRef{ + Value: schema, + } +} + +func NewOneOfSchema(schemas ...*Schema) *Schema { + refs := make([]*SchemaRef, 0, len(schemas)) + for _, schema := range schemas { + refs = append(refs, &SchemaRef{Value: schema}) + } + return &Schema{ + OneOf: refs, + } +} + +func NewAnyOfSchema(schemas ...*Schema) *Schema { + refs := make([]*SchemaRef, 0, len(schemas)) + for _, schema := range schemas { + refs = append(refs, &SchemaRef{Value: schema}) + } + return &Schema{ + AnyOf: refs, + } +} + +func NewAllOfSchema(schemas ...*Schema) *Schema { + refs := make([]*SchemaRef, 0, len(schemas)) + for _, schema := range schemas { + refs = append(refs, &SchemaRef{Value: schema}) + } + return &Schema{ + AllOf: refs, + } +} + +func NewBoolSchema() *Schema { + return &Schema{ + Type: TypeBoolean, + } +} + +func NewFloat64Schema() *Schema { + return &Schema{ + Type: TypeNumber, + } +} + +func NewIntegerSchema() *Schema { + return &Schema{ + Type: TypeInteger, + } +} + +func NewInt32Schema() *Schema { + return &Schema{ + Type: TypeInteger, + Format: "int32", + } +} + +func NewInt64Schema() *Schema { + return &Schema{ + Type: TypeInteger, + Format: "int64", + } +} + +func NewStringSchema() *Schema { + return &Schema{ + Type: TypeString, + } +} + +func NewDateTimeSchema() *Schema { + return &Schema{ + Type: TypeString, + Format: "date-time", + } +} + +func NewUUIDSchema() *Schema { + return &Schema{ + Type: TypeString, + Format: "uuid", + } +} + +func NewBytesSchema() *Schema { + return &Schema{ + Type: TypeString, + Format: "byte", + } +} + +func NewArraySchema() *Schema { + return &Schema{ + Type: TypeArray, + } +} + +func NewObjectSchema() *Schema { + return &Schema{ + Type: TypeObject, + Properties: make(Schemas), + } +} + +func (schema *Schema) WithNullable() *Schema { + schema.Nullable = true + return schema +} + +func (schema *Schema) WithMin(value float64) *Schema { + schema.Min = &value + return schema +} + +func (schema *Schema) WithMax(value float64) *Schema { + schema.Max = &value + return schema +} +func (schema *Schema) WithExclusiveMin(value bool) *Schema { + schema.ExclusiveMin = value + return schema +} + +func (schema *Schema) WithExclusiveMax(value bool) *Schema { + schema.ExclusiveMax = value + return schema +} + +func (schema *Schema) WithEnum(values ...interface{}) *Schema { + schema.Enum = values + return schema +} + +func (schema *Schema) WithDefault(defaultValue interface{}) *Schema { + schema.Default = defaultValue + return schema +} + +func (schema *Schema) WithFormat(value string) *Schema { + schema.Format = value + return schema +} + +func (schema *Schema) WithLength(i int64) *Schema { + n := uint64(i) + schema.MinLength = n + schema.MaxLength = &n + return schema +} + +func (schema *Schema) WithMinLength(i int64) *Schema { + n := uint64(i) + schema.MinLength = n + return schema +} + +func (schema *Schema) WithMaxLength(i int64) *Schema { + n := uint64(i) + schema.MaxLength = &n + return schema +} + +func (schema *Schema) WithLengthDecodedBase64(i int64) *Schema { + n := uint64(i) + v := (n*8 + 5) / 6 + schema.MinLength = v + schema.MaxLength = &v + return schema +} + +func (schema *Schema) WithMinLengthDecodedBase64(i int64) *Schema { + n := uint64(i) + schema.MinLength = (n*8 + 5) / 6 + return schema +} + +func (schema *Schema) WithMaxLengthDecodedBase64(i int64) *Schema { + n := uint64(i) + schema.MinLength = (n*8 + 5) / 6 + return schema +} + +func (schema *Schema) WithPattern(pattern string) *Schema { + schema.Pattern = pattern + schema.compiledPattern = nil + return schema +} + +func (schema *Schema) WithItems(value *Schema) *Schema { + schema.Items = &SchemaRef{ + Value: value, + } + return schema +} + +func (schema *Schema) WithMinItems(i int64) *Schema { + n := uint64(i) + schema.MinItems = n + return schema +} + +func (schema *Schema) WithMaxItems(i int64) *Schema { + n := uint64(i) + schema.MaxItems = &n + return schema +} + +func (schema *Schema) WithUniqueItems(unique bool) *Schema { + schema.UniqueItems = unique + return schema +} + +func (schema *Schema) WithProperty(name string, propertySchema *Schema) *Schema { + return schema.WithPropertyRef(name, &SchemaRef{ + Value: propertySchema, + }) +} + +func (schema *Schema) WithPropertyRef(name string, ref *SchemaRef) *Schema { + properties := schema.Properties + if properties == nil { + properties = make(Schemas) + schema.Properties = properties + } + properties[name] = ref + return schema +} + +func (schema *Schema) WithProperties(properties map[string]*Schema) *Schema { + result := make(Schemas, len(properties)) + for k, v := range properties { + result[k] = &SchemaRef{ + Value: v, + } + } + schema.Properties = result + return schema +} + +func (schema *Schema) WithMinProperties(i int64) *Schema { + n := uint64(i) + schema.MinProps = n + return schema +} + +func (schema *Schema) WithMaxProperties(i int64) *Schema { + n := uint64(i) + schema.MaxProps = &n + return schema +} + +func (schema *Schema) WithAnyAdditionalProperties() *Schema { + schema.AdditionalProperties = nil + t := true + schema.AdditionalPropertiesAllowed = &t + return schema +} + +func (schema *Schema) WithAdditionalProperties(v *Schema) *Schema { + if v == nil { + schema.AdditionalProperties = nil + } else { + schema.AdditionalProperties = &SchemaRef{ + Value: v, + } + } + return schema +} + +func (schema *Schema) IsEmpty() bool { + if schema.Type != "" || schema.Format != "" || len(schema.Enum) != 0 || + schema.UniqueItems || schema.ExclusiveMin || schema.ExclusiveMax || + schema.Nullable || schema.ReadOnly || schema.WriteOnly || schema.AllowEmptyValue || + schema.Min != nil || schema.Max != nil || schema.MultipleOf != nil || + schema.MinLength != 0 || schema.MaxLength != nil || schema.Pattern != "" || + schema.MinItems != 0 || schema.MaxItems != nil || + len(schema.Required) != 0 || + schema.MinProps != 0 || schema.MaxProps != nil { + return false + } + if n := schema.Not; n != nil && !n.Value.IsEmpty() { + return false + } + if ap := schema.AdditionalProperties; ap != nil && !ap.Value.IsEmpty() { + return false + } + if apa := schema.AdditionalPropertiesAllowed; apa != nil && !*apa { + return false + } + if items := schema.Items; items != nil && !items.Value.IsEmpty() { + return false + } + for _, s := range schema.Properties { + if !s.Value.IsEmpty() { + return false + } + } + for _, s := range schema.OneOf { + if !s.Value.IsEmpty() { + return false + } + } + for _, s := range schema.AnyOf { + if !s.Value.IsEmpty() { + return false + } + } + for _, s := range schema.AllOf { + if !s.Value.IsEmpty() { + return false + } + } + return true +} + +func (value *Schema) Validate(ctx context.Context) error { + return value.validate(ctx, []*Schema{}) +} + +func (schema *Schema) validate(ctx context.Context, stack []*Schema) (err error) { + for _, existing := range stack { + if existing == schema { + return + } + } + stack = append(stack, schema) + + if schema.ReadOnly && schema.WriteOnly { + return errors.New("a property MUST NOT be marked as both readOnly and writeOnly being true") + } + + for _, item := range schema.OneOf { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + if err = v.validate(ctx, stack); err == nil { + return + } + } + + for _, item := range schema.AnyOf { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + for _, item := range schema.AllOf { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + if ref := schema.Not; ref != nil { + v := ref.Value + if v == nil { + return foundUnresolvedRef(ref.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + schemaType := schema.Type + switch schemaType { + case "": + case TypeBoolean: + case TypeNumber: + if format := schema.Format; len(format) > 0 { + switch format { + case "float", "double": + default: + if !SchemaFormatValidationDisabled { + return unsupportedFormat(format) + } + } + } + case TypeInteger: + if format := schema.Format; len(format) > 0 { + switch format { + case "int32", "int64": + default: + if !SchemaFormatValidationDisabled { + return unsupportedFormat(format) + } + } + } + case TypeString: + if format := schema.Format; len(format) > 0 { + switch format { + // Supported by OpenAPIv3.0.1: + case "byte", "binary", "date", "date-time", "password": + // In JSON Draft-07 (not validated yet though): + case "regex": + case "time", "email", "idn-email": + case "hostname", "idn-hostname", "ipv4", "ipv6": + case "uri", "uri-reference", "iri", "iri-reference", "uri-template": + case "json-pointer", "relative-json-pointer": + default: + // Try to check for custom defined formats + if _, ok := SchemaStringFormats[format]; !ok && !SchemaFormatValidationDisabled { + return unsupportedFormat(format) + } + } + } + if schema.Pattern != "" { + if err = schema.compilePattern(); err != nil { + return err + } + } + case TypeArray: + if schema.Items == nil { + return errors.New("when schema type is 'array', schema 'items' must be non-null") + } + case TypeObject: + default: + return fmt.Errorf("unsupported 'type' value %q", schemaType) + } + + if ref := schema.Items; ref != nil { + v := ref.Value + if v == nil { + return foundUnresolvedRef(ref.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + for _, ref := range schema.Properties { + v := ref.Value + if v == nil { + return foundUnresolvedRef(ref.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + if ref := schema.AdditionalProperties; ref != nil { + v := ref.Value + if v == nil { + return foundUnresolvedRef(ref.Ref) + } + if err = v.validate(ctx, stack); err != nil { + return + } + } + + return +} + +func (schema *Schema) IsMatching(value interface{}) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) IsMatchingJSONBoolean(value bool) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) IsMatchingJSONNumber(value float64) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) IsMatchingJSONString(value string) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) IsMatchingJSONArray(value []interface{}) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) IsMatchingJSONObject(value map[string]interface{}) bool { + settings := newSchemaValidationSettings(FailFast()) + return schema.visitJSON(settings, value) == nil +} + +func (schema *Schema) VisitJSON(value interface{}, opts ...SchemaValidationOption) error { + settings := newSchemaValidationSettings(opts...) + return schema.visitJSON(settings, value) +} + +func (schema *Schema) visitJSON(settings *schemaValidationSettings, value interface{}) (err error) { + switch value := value.(type) { + case nil: + return schema.visitJSONNull(settings) + case float64: + if math.IsNaN(value) { + return ErrSchemaInputNaN + } + if math.IsInf(value, 0) { + return ErrSchemaInputInf + } + } + + if schema.IsEmpty() { + return + } + if err = schema.visitSetOperations(settings, value); err != nil { + return + } + + switch value := value.(type) { + case nil: + return schema.visitJSONNull(settings) + case bool: + return schema.visitJSONBoolean(settings, value) + case float64: + return schema.visitJSONNumber(settings, value) + case string: + return schema.visitJSONString(settings, value) + case []interface{}: + return schema.visitJSONArray(settings, value) + case map[string]interface{}: + return schema.visitJSONObject(settings, value) + case map[interface{}]interface{}: // for YAML cf. issue #444 + values := make(map[string]interface{}, len(value)) + for key, v := range value { + if k, ok := key.(string); ok { + values[k] = v + } + } + if len(value) == len(values) { + return schema.visitJSONObject(settings, values) + } + } + return &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "type", + Reason: fmt.Sprintf("unhandled value of type %T", value), + } +} + +func (schema *Schema) visitSetOperations(settings *schemaValidationSettings, value interface{}) (err error) { + if enum := schema.Enum; len(enum) != 0 { + for _, v := range enum { + if value == v { + return + } + } + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "enum", + Reason: "value is not one of the allowed values", + } + } + + if ref := schema.Not; ref != nil { + v := ref.Value + if v == nil { + return foundUnresolvedRef(ref.Ref) + } + if err := v.visitJSON(settings, value); err == nil { + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "not", + } + } + } + + if v := schema.OneOf; len(v) > 0 { + var discriminatorRef string + if schema.Discriminator != nil { + pn := schema.Discriminator.PropertyName + if valuemap, okcheck := value.(map[string]interface{}); okcheck { + discriminatorVal, okcheck := valuemap[pn] + if !okcheck { + return errors.New("input does not contain the discriminator property") + } + + if discriminatorRef, okcheck = schema.Discriminator.Mapping[discriminatorVal.(string)]; len(schema.Discriminator.Mapping) > 0 && !okcheck { + return errors.New("input does not contain a valid discriminator value") + } + } + } + + ok := 0 + validationErrors := []error{} + for _, item := range v { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + + if discriminatorRef != "" && discriminatorRef != item.Ref { + continue + } + + if err := v.visitJSON(settings, value); err != nil { + validationErrors = append(validationErrors, err) + continue + } + + ok++ + } + + if ok != 1 { + if len(validationErrors) > 1 { + errorMessage := "" + for _, err := range validationErrors { + if errorMessage != "" { + errorMessage += " Or " + } + errorMessage += err.Error() + } + return errors.New("doesn't match schema due to: " + errorMessage) + } + if settings.failfast { + return errSchema + } + e := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "oneOf", + } + if ok > 1 { + e.Origin = ErrOneOfConflict + } else if len(validationErrors) == 1 { + e.Origin = validationErrors[0] + } + + return e + } + } + + if v := schema.AnyOf; len(v) > 0 { + ok := false + for _, item := range v { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + if err := v.visitJSON(settings, value); err == nil { + ok = true + break + } + } + if !ok { + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "anyOf", + } + } + } + + for _, item := range schema.AllOf { + v := item.Value + if v == nil { + return foundUnresolvedRef(item.Ref) + } + if err := v.visitJSON(settings, value); err != nil { + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "allOf", + Origin: err, + } + } + } + return +} + +func (schema *Schema) visitJSONNull(settings *schemaValidationSettings) (err error) { + if schema.Nullable { + return + } + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: nil, + Schema: schema, + SchemaField: "nullable", + Reason: "Value is not nullable", + } +} + +func (schema *Schema) VisitJSONBoolean(value bool) error { + settings := newSchemaValidationSettings() + return schema.visitJSONBoolean(settings, value) +} + +func (schema *Schema) visitJSONBoolean(settings *schemaValidationSettings, value bool) (err error) { + if schemaType := schema.Type; schemaType != "" && schemaType != TypeBoolean { + return schema.expectedType(settings, TypeBoolean) + } + return +} + +func (schema *Schema) VisitJSONNumber(value float64) error { + settings := newSchemaValidationSettings() + return schema.visitJSONNumber(settings, value) +} + +func (schema *Schema) visitJSONNumber(settings *schemaValidationSettings, value float64) error { + var me MultiError + schemaType := schema.Type + if schemaType == "integer" { + if bigFloat := big.NewFloat(value); !bigFloat.IsInt() { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "type", + Reason: "Value must be an integer", + } + if !settings.multiError { + return err + } + me = append(me, err) + } + } else if schemaType != "" && schemaType != TypeNumber { + return schema.expectedType(settings, "number, integer") + } + + // "exclusiveMinimum" + if v := schema.ExclusiveMin; v && !(*schema.Min < value) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "exclusiveMinimum", + Reason: fmt.Sprintf("number must be more than %g", *schema.Min), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "exclusiveMaximum" + if v := schema.ExclusiveMax; v && !(*schema.Max > value) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "exclusiveMaximum", + Reason: fmt.Sprintf("number must be less than %g", *schema.Max), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "minimum" + if v := schema.Min; v != nil && !(*v <= value) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "minimum", + Reason: fmt.Sprintf("number must be at least %g", *v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "maximum" + if v := schema.Max; v != nil && !(*v >= value) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "maximum", + Reason: fmt.Sprintf("number must be at most %g", *v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "multipleOf" + if v := schema.MultipleOf; v != nil { + // "A numeric instance is valid only if division by this keyword's + // value results in an integer." + if bigFloat := big.NewFloat(value / *v); !bigFloat.IsInt() { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "multipleOf", + } + if !settings.multiError { + return err + } + me = append(me, err) + } + } + + if len(me) > 0 { + return me + } + + return nil +} + +func (schema *Schema) VisitJSONString(value string) error { + settings := newSchemaValidationSettings() + return schema.visitJSONString(settings, value) +} + +func (schema *Schema) visitJSONString(settings *schemaValidationSettings, value string) error { + if schemaType := schema.Type; schemaType != "" && schemaType != TypeString { + return schema.expectedType(settings, TypeString) + } + + var me MultiError + + // "minLength" and "maxLength" + minLength := schema.MinLength + maxLength := schema.MaxLength + if minLength != 0 || maxLength != nil { + // JSON schema string lengths are UTF-16, not UTF-8! + length := int64(0) + for _, r := range value { + if utf16.IsSurrogate(r) { + length += 2 + } else { + length++ + } + } + if minLength != 0 && length < int64(minLength) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "minLength", + Reason: fmt.Sprintf("minimum string length is %d", minLength), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + if maxLength != nil && length > int64(*maxLength) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "maxLength", + Reason: fmt.Sprintf("maximum string length is %d", *maxLength), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + } + + // "pattern" + if schema.Pattern != "" && schema.compiledPattern == nil { + var err error + if err = schema.compilePattern(); err != nil { + if !settings.multiError { + return err + } + me = append(me, err) + } + } + if cp := schema.compiledPattern; cp != nil && !cp.MatchString(value) { + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "pattern", + Reason: fmt.Sprintf(`string doesn't match the regular expression "%s"`, schema.Pattern), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "format" + var formatErr string + if format := schema.Format; format != "" { + if f, ok := SchemaStringFormats[format]; ok { + switch { + case f.regexp != nil && f.callback == nil: + if cp := f.regexp; !cp.MatchString(value) { + formatErr = fmt.Sprintf(`string doesn't match the format %q (regular expression "%s")`, format, cp.String()) + } + case f.regexp == nil && f.callback != nil: + if err := f.callback(value); err != nil { + formatErr = err.Error() + } + default: + formatErr = fmt.Sprintf("corrupted entry %q in SchemaStringFormats", format) + } + } + } + if formatErr != "" { + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "format", + Reason: formatErr, + } + if !settings.multiError { + return err + } + me = append(me, err) + + } + + if len(me) > 0 { + return me + } + + return nil +} + +func (schema *Schema) VisitJSONArray(value []interface{}) error { + settings := newSchemaValidationSettings() + return schema.visitJSONArray(settings, value) +} + +func (schema *Schema) visitJSONArray(settings *schemaValidationSettings, value []interface{}) error { + if schemaType := schema.Type; schemaType != "" && schemaType != TypeArray { + return schema.expectedType(settings, TypeArray) + } + + var me MultiError + + lenValue := int64(len(value)) + + // "minItems" + if v := schema.MinItems; v != 0 && lenValue < int64(v) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "minItems", + Reason: fmt.Sprintf("minimum number of items is %d", v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "maxItems" + if v := schema.MaxItems; v != nil && lenValue > int64(*v) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "maxItems", + Reason: fmt.Sprintf("maximum number of items is %d", *v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "uniqueItems" + if sliceUniqueItemsChecker == nil { + sliceUniqueItemsChecker = isSliceOfUniqueItems + } + if v := schema.UniqueItems; v && !sliceUniqueItemsChecker(value) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "uniqueItems", + Reason: "duplicate items found", + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "items" + if itemSchemaRef := schema.Items; itemSchemaRef != nil { + itemSchema := itemSchemaRef.Value + if itemSchema == nil { + return foundUnresolvedRef(itemSchemaRef.Ref) + } + for i, item := range value { + if err := itemSchema.visitJSON(settings, item); err != nil { + err = markSchemaErrorIndex(err, i) + if !settings.multiError { + return err + } + if itemMe, ok := err.(MultiError); ok { + me = append(me, itemMe...) + } else { + me = append(me, err) + } + } + } + } + + if len(me) > 0 { + return me + } + + return nil +} + +func (schema *Schema) VisitJSONObject(value map[string]interface{}) error { + settings := newSchemaValidationSettings() + return schema.visitJSONObject(settings, value) +} + +func (schema *Schema) visitJSONObject(settings *schemaValidationSettings, value map[string]interface{}) error { + if schemaType := schema.Type; schemaType != "" && schemaType != TypeObject { + return schema.expectedType(settings, TypeObject) + } + + var me MultiError + + // "properties" + properties := schema.Properties + lenValue := int64(len(value)) + + // "minProperties" + if v := schema.MinProps; v != 0 && lenValue < int64(v) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "minProperties", + Reason: fmt.Sprintf("there must be at least %d properties", v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "maxProperties" + if v := schema.MaxProps; v != nil && lenValue > int64(*v) { + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "maxProperties", + Reason: fmt.Sprintf("there must be at most %d properties", *v), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "additionalProperties" + var additionalProperties *Schema + if ref := schema.AdditionalProperties; ref != nil { + additionalProperties = ref.Value + } + for k, v := range value { + if properties != nil { + propertyRef := properties[k] + if propertyRef != nil { + p := propertyRef.Value + if p == nil { + return foundUnresolvedRef(propertyRef.Ref) + } + if err := p.visitJSON(settings, v); err != nil { + if settings.failfast { + return errSchema + } + err = markSchemaErrorKey(err, k) + if !settings.multiError { + return err + } + if v, ok := err.(MultiError); ok { + me = append(me, v...) + continue + } + me = append(me, err) + } + continue + } + } + allowed := schema.AdditionalPropertiesAllowed + if additionalProperties != nil || allowed == nil || (allowed != nil && *allowed) { + if additionalProperties != nil { + if err := additionalProperties.visitJSON(settings, v); err != nil { + if settings.failfast { + return errSchema + } + err = markSchemaErrorKey(err, k) + if !settings.multiError { + return err + } + if v, ok := err.(MultiError); ok { + me = append(me, v...) + continue + } + me = append(me, err) + } + } + continue + } + if settings.failfast { + return errSchema + } + err := &SchemaError{ + Value: value, + Schema: schema, + SchemaField: "properties", + Reason: fmt.Sprintf("property %q is unsupported", k), + } + if !settings.multiError { + return err + } + me = append(me, err) + } + + // "required" + for _, k := range schema.Required { + if _, ok := value[k]; !ok { + if s := schema.Properties[k]; s != nil && s.Value.ReadOnly && settings.asreq { + continue + } + if s := schema.Properties[k]; s != nil && s.Value.WriteOnly && settings.asrep { + continue + } + if settings.failfast { + return errSchema + } + err := markSchemaErrorKey(&SchemaError{ + Value: value, + Schema: schema, + SchemaField: "required", + Reason: fmt.Sprintf("property %q is missing", k), + }, k) + if !settings.multiError { + return err + } + me = append(me, err) + } + } + + if len(me) > 0 { + return me + } + + return nil +} + +func (schema *Schema) expectedType(settings *schemaValidationSettings, typ string) error { + if settings.failfast { + return errSchema + } + return &SchemaError{ + Value: typ, + Schema: schema, + SchemaField: "type", + Reason: "Field must be set to " + schema.Type + " or not be present", + } +} + +func (schema *Schema) compilePattern() (err error) { + if schema.compiledPattern, err = regexp.Compile(schema.Pattern); err != nil { + return &SchemaError{ + Schema: schema, + SchemaField: "pattern", + Reason: fmt.Sprintf("cannot compile pattern %q: %v", schema.Pattern, err), + } + } + return nil +} + +type SchemaError struct { + Value interface{} + reversePath []string + Schema *Schema + SchemaField string + Reason string + Origin error +} + +var _ interface{ Unwrap() error } = SchemaError{} + +func markSchemaErrorKey(err error, key string) error { + if v, ok := err.(*SchemaError); ok { + v.reversePath = append(v.reversePath, key) + return v + } + if v, ok := err.(MultiError); ok { + for _, e := range v { + _ = markSchemaErrorKey(e, key) + } + return v + } + return err +} + +func markSchemaErrorIndex(err error, index int) error { + if v, ok := err.(*SchemaError); ok { + v.reversePath = append(v.reversePath, strconv.FormatInt(int64(index), 10)) + return v + } + if v, ok := err.(MultiError); ok { + for _, e := range v { + _ = markSchemaErrorIndex(e, index) + } + return v + } + return err +} + +func (err *SchemaError) JSONPointer() []string { + reversePath := err.reversePath + path := append([]string(nil), reversePath...) + for left, right := 0, len(path)-1; left < right; left, right = left+1, right-1 { + path[left], path[right] = path[right], path[left] + } + return path +} + +func (err *SchemaError) Error() string { + if err.Origin != nil { + return err.Origin.Error() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + if len(err.reversePath) > 0 { + buf.WriteString(`Error at "`) + reversePath := err.reversePath + for i := len(reversePath) - 1; i >= 0; i-- { + buf.WriteByte('/') + buf.WriteString(reversePath[i]) + } + buf.WriteString(`": `) + } + reason := err.Reason + if reason == "" { + buf.WriteString(`Doesn't match schema "`) + buf.WriteString(err.SchemaField) + buf.WriteString(`"`) + } else { + buf.WriteString(reason) + } + if !SchemaErrorDetailsDisabled { + buf.WriteString("\nSchema:\n ") + encoder := json.NewEncoder(buf) + encoder.SetIndent(" ", " ") + if err := encoder.Encode(err.Schema); err != nil { + panic(err) + } + buf.WriteString("\nValue:\n ") + if err := encoder.Encode(err.Value); err != nil { + panic(err) + } + } + return buf.String() +} + +func (err SchemaError) Unwrap() error { + return err.Origin +} + +func isSliceOfUniqueItems(xs []interface{}) bool { + s := len(xs) + m := make(map[string]struct{}, s) + for _, x := range xs { + // The input slice is coverted from a JSON string, there shall + // have no error when covert it back. + key, _ := json.Marshal(&x) + m[string(key)] = struct{}{} + } + return s == len(m) +} + +// SliceUniqueItemsChecker is an function used to check if an given slice +// have unique items. +type SliceUniqueItemsChecker func(items []interface{}) bool + +// By default using predefined func isSliceOfUniqueItems which make use of +// json.Marshal to generate a key for map used to check if a given slice +// have unique items. +var sliceUniqueItemsChecker SliceUniqueItemsChecker = isSliceOfUniqueItems + +// RegisterArrayUniqueItemsChecker is used to register a customized function +// used to check if JSON array have unique items. +func RegisterArrayUniqueItemsChecker(fn SliceUniqueItemsChecker) { + sliceUniqueItemsChecker = fn +} + +func unsupportedFormat(format string) error { + return fmt.Errorf("unsupported 'format' value %q", format) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go b/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go new file mode 100644 index 00000000000..8a8a9406dbd --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go @@ -0,0 +1,105 @@ +package openapi3 + +import ( + "fmt" + "net" + "regexp" + "strings" +) + +const ( + // FormatOfStringForUUIDOfRFC4122 is an optional predefined format for UUID v1-v5 as specified by RFC4122 + FormatOfStringForUUIDOfRFC4122 = `^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$` +) + +//FormatCallback custom check on exotic formats +type FormatCallback func(Val string) error + +type Format struct { + regexp *regexp.Regexp + callback FormatCallback +} + +//SchemaStringFormats allows for validating strings format +var SchemaStringFormats = make(map[string]Format, 8) + +//DefineStringFormat Defines a new regexp pattern for a given format +func DefineStringFormat(name string, pattern string) { + re, err := regexp.Compile(pattern) + if err != nil { + err := fmt.Errorf("format %q has invalid pattern %q: %v", name, pattern, err) + panic(err) + } + SchemaStringFormats[name] = Format{regexp: re} +} + +// DefineStringFormatCallback adds a validation function for a specific schema format entry +func DefineStringFormatCallback(name string, callback FormatCallback) { + SchemaStringFormats[name] = Format{callback: callback} +} + +func validateIP(ip string) error { + parsed := net.ParseIP(ip) + if parsed == nil { + return &SchemaError{ + Value: ip, + Reason: "Not an IP address", + } + } + return nil +} + +func validateIPv4(ip string) error { + if err := validateIP(ip); err != nil { + return err + } + + if !(strings.Count(ip, ":") < 2) { + return &SchemaError{ + Value: ip, + Reason: "Not an IPv4 address (it's IPv6)", + } + } + return nil +} + +func validateIPv6(ip string) error { + if err := validateIP(ip); err != nil { + return err + } + + if !(strings.Count(ip, ":") >= 2) { + return &SchemaError{ + Value: ip, + Reason: "Not an IPv6 address (it's IPv4)", + } + } + return nil +} + +func init() { + // This pattern catches only some suspiciously wrong-looking email addresses. + // Use DefineStringFormat(...) if you need something stricter. + DefineStringFormat("email", `^[^@]+@[^@<>",\s]+$`) + + // Base64 + // The pattern supports base64 and b./ase64url. Padding ('=') is supported. + DefineStringFormat("byte", `(^$|^[a-zA-Z0-9+/\-_]*=*$)`) + + // date + DefineStringFormat("date", `^[0-9]{4}-(0[0-9]|10|11|12)-([0-2][0-9]|30|31)$`) + + // date-time + DefineStringFormat("date-time", `^[0-9]{4}-(0[0-9]|10|11|12)-([0-2][0-9]|30|31)T[0-9]{2}:[0-9]{2}:[0-9]{2}(.[0-9]+)?(Z|(\+|-)[0-9]{2}:[0-9]{2})?$`) + +} + +// DefineIPv4Format opts in ipv4 format validation on top of OAS 3 spec +func DefineIPv4Format() { + DefineStringFormatCallback("ipv4", validateIPv4) +} + +// DefineIPv6Format opts in ipv6 format validation on top of OAS 3 spec +func DefineIPv6Format() { + DefineStringFormatCallback("ipv6", validateIPv6) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/schema_validation_settings.go b/vendor/github.com/getkin/kin-openapi/openapi3/schema_validation_settings.go new file mode 100644 index 00000000000..71db5f237a2 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/schema_validation_settings.go @@ -0,0 +1,34 @@ +package openapi3 + +// SchemaValidationOption describes options a user has when validating request / response bodies. +type SchemaValidationOption func(*schemaValidationSettings) + +type schemaValidationSettings struct { + failfast bool + multiError bool + asreq, asrep bool // exclusive (XOR) fields +} + +// FailFast returns schema validation errors quicker. +func FailFast() SchemaValidationOption { + return func(s *schemaValidationSettings) { s.failfast = true } +} + +func MultiErrors() SchemaValidationOption { + return func(s *schemaValidationSettings) { s.multiError = true } +} + +func VisitAsRequest() SchemaValidationOption { + return func(s *schemaValidationSettings) { s.asreq, s.asrep = true, false } +} +func VisitAsResponse() SchemaValidationOption { + return func(s *schemaValidationSettings) { s.asreq, s.asrep = false, true } +} + +func newSchemaValidationSettings(opts ...SchemaValidationOption) *schemaValidationSettings { + settings := &schemaValidationSettings{} + for _, opt := range opts { + opt(settings) + } + return settings +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go b/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go new file mode 100644 index 00000000000..ce6fcc6f179 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go @@ -0,0 +1,43 @@ +package openapi3 + +import ( + "context" +) + +type SecurityRequirements []SecurityRequirement + +func NewSecurityRequirements() *SecurityRequirements { + return &SecurityRequirements{} +} + +func (srs *SecurityRequirements) With(securityRequirement SecurityRequirement) *SecurityRequirements { + *srs = append(*srs, securityRequirement) + return srs +} + +func (value SecurityRequirements) Validate(ctx context.Context) error { + for _, item := range value { + if err := item.Validate(ctx); err != nil { + return err + } + } + return nil +} + +type SecurityRequirement map[string][]string + +func NewSecurityRequirement() SecurityRequirement { + return make(SecurityRequirement) +} + +func (security SecurityRequirement) Authenticate(provider string, scopes ...string) SecurityRequirement { + if len(scopes) == 0 { + scopes = []string{} // Forces the variable to be encoded as an array instead of null + } + security[provider] = scopes + return security +} + +func (value SecurityRequirement) Validate(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go b/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go new file mode 100644 index 00000000000..990f258d4ff --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go @@ -0,0 +1,241 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + + "github.com/getkin/kin-openapi/jsoninfo" + "github.com/go-openapi/jsonpointer" +) + +type SecuritySchemes map[string]*SecuritySchemeRef + +func (s SecuritySchemes) JSONLookup(token string) (interface{}, error) { + ref, ok := s[token] + if ref == nil || ok == false { + return nil, fmt.Errorf("object has no field %q", token) + } + + if ref.Ref != "" { + return &Ref{Ref: ref.Ref}, nil + } + return ref.Value, nil +} + +var _ jsonpointer.JSONPointable = (*SecuritySchemes)(nil) + +type SecurityScheme struct { + ExtensionProps + + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + In string `json:"in,omitempty" yaml:"in,omitempty"` + Scheme string `json:"scheme,omitempty" yaml:"scheme,omitempty"` + BearerFormat string `json:"bearerFormat,omitempty" yaml:"bearerFormat,omitempty"` + Flows *OAuthFlows `json:"flows,omitempty" yaml:"flows,omitempty"` + OpenIdConnectUrl string `json:"openIdConnectUrl,omitempty" yaml:"openIdConnectUrl,omitempty"` +} + +func NewSecurityScheme() *SecurityScheme { + return &SecurityScheme{} +} + +func NewCSRFSecurityScheme() *SecurityScheme { + return &SecurityScheme{ + Type: "apiKey", + In: "header", + Name: "X-XSRF-TOKEN", + } +} + +func NewOIDCSecurityScheme(oidcUrl string) *SecurityScheme { + return &SecurityScheme{ + Type: "openIdConnect", + OpenIdConnectUrl: oidcUrl, + } +} + +func NewJWTSecurityScheme() *SecurityScheme { + return &SecurityScheme{ + Type: "http", + Scheme: "bearer", + BearerFormat: "JWT", + } +} + +func (ss *SecurityScheme) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(ss) +} + +func (ss *SecurityScheme) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, ss) +} + +func (ss *SecurityScheme) WithType(value string) *SecurityScheme { + ss.Type = value + return ss +} + +func (ss *SecurityScheme) WithDescription(value string) *SecurityScheme { + ss.Description = value + return ss +} + +func (ss *SecurityScheme) WithName(value string) *SecurityScheme { + ss.Name = value + return ss +} + +func (ss *SecurityScheme) WithIn(value string) *SecurityScheme { + ss.In = value + return ss +} + +func (ss *SecurityScheme) WithScheme(value string) *SecurityScheme { + ss.Scheme = value + return ss +} + +func (ss *SecurityScheme) WithBearerFormat(value string) *SecurityScheme { + ss.BearerFormat = value + return ss +} + +func (value *SecurityScheme) Validate(ctx context.Context) error { + hasIn := false + hasBearerFormat := false + hasFlow := false + switch value.Type { + case "apiKey": + hasIn = true + case "http": + scheme := value.Scheme + switch scheme { + case "bearer": + hasBearerFormat = true + case "basic", "negotiate", "digest": + default: + return fmt.Errorf("security scheme of type 'http' has invalid 'scheme' value %q", scheme) + } + case "oauth2": + hasFlow = true + case "openIdConnect": + if value.OpenIdConnectUrl == "" { + return fmt.Errorf("no OIDC URL found for openIdConnect security scheme %q", value.Name) + } + default: + return fmt.Errorf("security scheme 'type' can't be %q", value.Type) + } + + // Validate "in" and "name" + if hasIn { + switch value.In { + case "query", "header", "cookie": + default: + return fmt.Errorf("security scheme of type 'apiKey' should have 'in'. It can be 'query', 'header' or 'cookie', not %q", value.In) + } + if value.Name == "" { + return errors.New("security scheme of type 'apiKey' should have 'name'") + } + } else if len(value.In) > 0 { + return fmt.Errorf("security scheme of type %q can't have 'in'", value.Type) + } else if len(value.Name) > 0 { + return errors.New("security scheme of type 'apiKey' can't have 'name'") + } + + // Validate "format" + // "bearerFormat" is an arbitrary string so we only check if the scheme supports it + if !hasBearerFormat && len(value.BearerFormat) > 0 { + return fmt.Errorf("security scheme of type %q can't have 'bearerFormat'", value.Type) + } + + // Validate "flow" + if hasFlow { + flow := value.Flows + if flow == nil { + return fmt.Errorf("security scheme of type %q should have 'flows'", value.Type) + } + if err := flow.Validate(ctx); err != nil { + return fmt.Errorf("security scheme 'flow' is invalid: %v", err) + } + } else if value.Flows != nil { + return fmt.Errorf("security scheme of type %q can't have 'flows'", value.Type) + } + return nil +} + +type OAuthFlows struct { + ExtensionProps + Implicit *OAuthFlow `json:"implicit,omitempty" yaml:"implicit,omitempty"` + Password *OAuthFlow `json:"password,omitempty" yaml:"password,omitempty"` + ClientCredentials *OAuthFlow `json:"clientCredentials,omitempty" yaml:"clientCredentials,omitempty"` + AuthorizationCode *OAuthFlow `json:"authorizationCode,omitempty" yaml:"authorizationCode,omitempty"` +} + +type oAuthFlowType int + +const ( + oAuthFlowTypeImplicit oAuthFlowType = iota + oAuthFlowTypePassword + oAuthFlowTypeClientCredentials + oAuthFlowAuthorizationCode +) + +func (flows *OAuthFlows) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(flows) +} + +func (flows *OAuthFlows) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, flows) +} + +func (flows *OAuthFlows) Validate(ctx context.Context) error { + if v := flows.Implicit; v != nil { + return v.Validate(ctx, oAuthFlowTypeImplicit) + } + if v := flows.Password; v != nil { + return v.Validate(ctx, oAuthFlowTypePassword) + } + if v := flows.ClientCredentials; v != nil { + return v.Validate(ctx, oAuthFlowTypeClientCredentials) + } + if v := flows.AuthorizationCode; v != nil { + return v.Validate(ctx, oAuthFlowAuthorizationCode) + } + return errors.New("no OAuth flow is defined") +} + +type OAuthFlow struct { + ExtensionProps + AuthorizationURL string `json:"authorizationUrl,omitempty" yaml:"authorizationUrl,omitempty"` + TokenURL string `json:"tokenUrl,omitempty" yaml:"tokenUrl,omitempty"` + RefreshURL string `json:"refreshUrl,omitempty" yaml:"refreshUrl,omitempty"` + Scopes map[string]string `json:"scopes" yaml:"scopes"` +} + +func (flow *OAuthFlow) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(flow) +} + +func (flow *OAuthFlow) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, flow) +} + +func (flow *OAuthFlow) Validate(ctx context.Context, typ oAuthFlowType) error { + if typ == oAuthFlowAuthorizationCode || typ == oAuthFlowTypeImplicit { + if v := flow.AuthorizationURL; v == "" { + return errors.New("an OAuth flow is missing 'authorizationUrl in authorizationCode or implicit '") + } + } + if typ != oAuthFlowTypeImplicit { + if v := flow.TokenURL; v == "" { + return errors.New("an OAuth flow is missing 'tokenUrl in not implicit'") + } + } + if v := flow.Scopes; v == nil { + return errors.New("an OAuth flow is missing 'scopes'") + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go b/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go new file mode 100644 index 00000000000..2ec8bd2db8a --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go @@ -0,0 +1,17 @@ +package openapi3 + +const ( + SerializationSimple = "simple" + SerializationLabel = "label" + SerializationMatrix = "matrix" + SerializationForm = "form" + SerializationSpaceDelimited = "spaceDelimited" + SerializationPipeDelimited = "pipeDelimited" + SerializationDeepObject = "deepObject" +) + +// SerializationMethod describes a serialization method of HTTP request's parameters and body. +type SerializationMethod struct { + Style string + Explode bool +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/server.go b/vendor/github.com/getkin/kin-openapi/openapi3/server.go new file mode 100644 index 00000000000..4415bd08fcd --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/server.go @@ -0,0 +1,175 @@ +package openapi3 + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "strings" + + "github.com/getkin/kin-openapi/jsoninfo" +) + +// Servers is specified by OpenAPI/Swagger standard version 3.0. +type Servers []*Server + +// Validate ensures servers are per the OpenAPIv3 specification. +func (value Servers) Validate(ctx context.Context) error { + for _, v := range value { + if err := v.Validate(ctx); err != nil { + return err + } + } + return nil +} + +func (servers Servers) MatchURL(parsedURL *url.URL) (*Server, []string, string) { + rawURL := parsedURL.String() + if i := strings.IndexByte(rawURL, '?'); i >= 0 { + rawURL = rawURL[:i] + } + for _, server := range servers { + pathParams, remaining, ok := server.MatchRawURL(rawURL) + if ok { + return server, pathParams, remaining + } + } + return nil, nil, "" +} + +// Server is specified by OpenAPI/Swagger standard version 3.0. +type Server struct { + ExtensionProps + URL string `json:"url" yaml:"url"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Variables map[string]*ServerVariable `json:"variables,omitempty" yaml:"variables,omitempty"` +} + +func (server *Server) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(server) +} + +func (server *Server) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, server) +} + +func (server Server) ParameterNames() ([]string, error) { + pattern := server.URL + var params []string + for len(pattern) > 0 { + i := strings.IndexByte(pattern, '{') + if i < 0 { + break + } + pattern = pattern[i+1:] + i = strings.IndexByte(pattern, '}') + if i < 0 { + return nil, errors.New("missing '}'") + } + params = append(params, strings.TrimSpace(pattern[:i])) + pattern = pattern[i+1:] + } + return params, nil +} + +func (server Server) MatchRawURL(input string) ([]string, string, bool) { + pattern := server.URL + var params []string + for len(pattern) > 0 { + c := pattern[0] + if len(pattern) == 1 && c == '/' { + break + } + if c == '{' { + // Find end of pattern + i := strings.IndexByte(pattern, '}') + if i < 0 { + return nil, "", false + } + pattern = pattern[i+1:] + + // Find next matching pattern character or next '/' whichever comes first + np := -1 + if len(pattern) > 0 { + np = strings.IndexByte(input, pattern[0]) + } + ns := strings.IndexByte(input, '/') + + if np < 0 { + i = ns + } else if ns < 0 { + i = np + } else { + i = int(math.Min(float64(np), float64(ns))) + } + if i < 0 { + i = len(input) + } + params = append(params, input[:i]) + input = input[i:] + continue + } + if len(input) == 0 || input[0] != c { + return nil, "", false + } + pattern = pattern[1:] + input = input[1:] + } + if input == "" { + input = "/" + } + if input[0] != '/' { + return nil, "", false + } + return params, input, true +} + +func (value *Server) Validate(ctx context.Context) (err error) { + if value.URL == "" { + return errors.New("value of url must be a non-empty string") + } + opening, closing := strings.Count(value.URL, "{"), strings.Count(value.URL, "}") + if opening != closing { + return errors.New("server URL has mismatched { and }") + } + if opening != len(value.Variables) { + return errors.New("server has undeclared variables") + } + for name, v := range value.Variables { + if !strings.Contains(value.URL, fmt.Sprintf("{%s}", name)) { + return errors.New("server has undeclared variables") + } + if err = v.Validate(ctx); err != nil { + return + } + } + return +} + +// ServerVariable is specified by OpenAPI/Swagger standard version 3.0. +type ServerVariable struct { + ExtensionProps + Enum []string `json:"enum,omitempty" yaml:"enum,omitempty"` + Default string `json:"default,omitempty" yaml:"default,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` +} + +func (serverVariable *ServerVariable) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(serverVariable) +} + +func (serverVariable *ServerVariable) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, serverVariable) +} + +func (value *ServerVariable) Validate(ctx context.Context) error { + if value.Default == "" { + data, err := value.MarshalJSON() + if err != nil { + return err + } + return fmt.Errorf("field default is required in %s", data) + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/tag.go b/vendor/github.com/getkin/kin-openapi/openapi3/tag.go new file mode 100644 index 00000000000..210b692489b --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3/tag.go @@ -0,0 +1,31 @@ +package openapi3 + +import "github.com/getkin/kin-openapi/jsoninfo" + +// Tags is specified by OpenAPI/Swagger 3.0 standard. +type Tags []*Tag + +func (tags Tags) Get(name string) *Tag { + for _, tag := range tags { + if tag.Name == name { + return tag + } + } + return nil +} + +// Tag is specified by OpenAPI/Swagger 3.0 standard. +type Tag struct { + ExtensionProps + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` +} + +func (t *Tag) MarshalJSON() ([]byte, error) { + return jsoninfo.MarshalStrictStruct(t) +} + +func (t *Tag) UnmarshalJSON(data []byte) error { + return jsoninfo.UnmarshalStrictStruct(data, t) +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go new file mode 100644 index 00000000000..a53484b9935 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go @@ -0,0 +1,29 @@ +package openapi3filter + +import ( + "fmt" + + "github.com/getkin/kin-openapi/openapi3" +) + +type AuthenticationInput struct { + RequestValidationInput *RequestValidationInput + SecuritySchemeName string + SecurityScheme *openapi3.SecurityScheme + Scopes []string +} + +func (input *AuthenticationInput) NewError(err error) error { + if err == nil { + if len(input.Scopes) == 0 { + err = fmt.Errorf("security requirement %q failed", input.SecuritySchemeName) + } else { + err = fmt.Errorf("security requirement %q (scopes: %+v) failed", input.SecuritySchemeName, input.Scopes) + } + } + return &RequestError{ + Input: input.RequestValidationInput, + Reason: "authorization failed", + Err: err, + } +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go new file mode 100644 index 00000000000..8454c817f24 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go @@ -0,0 +1,82 @@ +package openapi3filter + +import ( + "fmt" + + "github.com/getkin/kin-openapi/openapi3" +) + +var _ error = &RequestError{} + +// RequestError is returned by ValidateRequest when request does not match OpenAPI spec +type RequestError struct { + Input *RequestValidationInput + Parameter *openapi3.Parameter + RequestBody *openapi3.RequestBody + Reason string + Err error +} + +var _ interface{ Unwrap() error } = RequestError{} + +func (err *RequestError) Error() string { + reason := err.Reason + if e := err.Err; e != nil { + if len(reason) == 0 { + reason = e.Error() + } else { + reason += ": " + e.Error() + } + } + if v := err.Parameter; v != nil { + return fmt.Sprintf("parameter %q in %s has an error: %s", v.Name, v.In, reason) + } else if v := err.RequestBody; v != nil { + return fmt.Sprintf("request body has an error: %s", reason) + } else { + return reason + } +} + +func (err RequestError) Unwrap() error { + return err.Err +} + +var _ error = &ResponseError{} + +// ResponseError is returned by ValidateResponse when response does not match OpenAPI spec +type ResponseError struct { + Input *ResponseValidationInput + Reason string + Err error +} + +var _ interface{ Unwrap() error } = ResponseError{} + +func (err *ResponseError) Error() string { + reason := err.Reason + if e := err.Err; e != nil { + if len(reason) == 0 { + reason = e.Error() + } else { + reason += ": " + e.Error() + } + } + return reason +} + +func (err ResponseError) Unwrap() error { + return err.Err +} + +var _ error = &SecurityRequirementsError{} + +// SecurityRequirementsError is returned by ValidateSecurityRequirements +// when no requirement is met. +type SecurityRequirementsError struct { + SecurityRequirements openapi3.SecurityRequirements + Errors []error +} + +func (err *SecurityRequirementsError) Error() string { + return "Security requirements failed" +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go new file mode 100644 index 00000000000..facaf1de57f --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go @@ -0,0 +1,13 @@ +package openapi3filter + +import ( + "strings" +) + +func parseMediaType(contentType string) string { + i := strings.IndexByte(contentType, ';') + if i < 0 { + return contentType + } + return contentType[:i] +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/middleware.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/middleware.go new file mode 100644 index 00000000000..3709faf9b78 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/middleware.go @@ -0,0 +1,273 @@ +package openapi3filter + +import ( + "bytes" + "io" + "io/ioutil" + "log" + "net/http" + + "github.com/getkin/kin-openapi/routers" +) + +// Validator provides HTTP request and response validation middleware. +type Validator struct { + router routers.Router + errFunc ErrFunc + logFunc LogFunc + strict bool +} + +// ErrFunc handles errors that may occur during validation. +type ErrFunc func(w http.ResponseWriter, status int, code ErrCode, err error) + +// LogFunc handles log messages that may occur during validation. +type LogFunc func(message string, err error) + +// ErrCode is used for classification of different types of errors that may +// occur during validation. These may be used to write an appropriate response +// in ErrFunc. +type ErrCode int + +const ( + // ErrCodeOK indicates no error. It is also the default value. + ErrCodeOK = 0 + // ErrCodeCannotFindRoute happens when the validator fails to resolve the + // request to a defined OpenAPI route. + ErrCodeCannotFindRoute = iota + // ErrCodeRequestInvalid happens when the inbound request does not conform + // to the OpenAPI 3 specification. + ErrCodeRequestInvalid = iota + // ErrCodeResponseInvalid happens when the wrapped handler response does + // not conform to the OpenAPI 3 specification. + ErrCodeResponseInvalid = iota +) + +func (e ErrCode) responseText() string { + switch e { + case ErrCodeOK: + return "OK" + case ErrCodeCannotFindRoute: + return "not found" + case ErrCodeRequestInvalid: + return "bad request" + default: + return "server error" + } +} + +// NewValidator returns a new response validation middlware, using the given +// routes from an OpenAPI 3 specification. +func NewValidator(router routers.Router, options ...ValidatorOption) *Validator { + v := &Validator{ + router: router, + errFunc: func(w http.ResponseWriter, status int, code ErrCode, _ error) { + http.Error(w, code.responseText(), status) + }, + logFunc: func(message string, err error) { + log.Printf("%s: %v", message, err) + }, + } + for i := range options { + options[i](v) + } + return v +} + +// ValidatorOption defines an option that may be specified when creating a +// Validator. +type ValidatorOption func(*Validator) + +// OnErr provides a callback that handles writing an HTTP response on a +// validation error. This allows customization of error responses without +// prescribing a particular form. This callback is only called on response +// validator errors in Strict mode. +func OnErr(f ErrFunc) ValidatorOption { + return func(v *Validator) { + v.errFunc = f + } +} + +// OnLog provides a callback that handles logging in the Validator. This allows +// the validator to integrate with a services' existing logging system without +// prescribing a particular one. +func OnLog(f LogFunc) ValidatorOption { + return func(v *Validator) { + v.logFunc = f + } +} + +// Strict, if set, causes an internal server error to be sent if the wrapped +// handler response fails response validation. If not set, the response is sent +// and the error is only logged. +func Strict(strict bool) ValidatorOption { + return func(v *Validator) { + v.strict = strict + } +} + +// Middleware returns an http.Handler which wraps the given handler with +// request and response validation. +func (v *Validator) Middleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + route, pathParams, err := v.router.FindRoute(r) + if err != nil { + v.logFunc("validation error: failed to find route for "+r.URL.String(), err) + v.errFunc(w, http.StatusNotFound, ErrCodeCannotFindRoute, err) + return + } + requestValidationInput := &RequestValidationInput{ + Request: r, + PathParams: pathParams, + Route: route, + } + if err = ValidateRequest(r.Context(), requestValidationInput); err != nil { + v.logFunc("invalid request", err) + v.errFunc(w, http.StatusBadRequest, ErrCodeRequestInvalid, err) + return + } + + var wr responseWrapper + if v.strict { + wr = &strictResponseWrapper{w: w} + } else { + wr = newWarnResponseWrapper(w) + } + + h.ServeHTTP(wr, r) + + if err = ValidateResponse(r.Context(), &ResponseValidationInput{ + RequestValidationInput: requestValidationInput, + Status: wr.statusCode(), + Header: wr.Header(), + Body: ioutil.NopCloser(bytes.NewBuffer(wr.bodyContents())), + }); err != nil { + v.logFunc("invalid response", err) + if v.strict { + v.errFunc(w, http.StatusInternalServerError, ErrCodeResponseInvalid, err) + } + return + } + + if err = wr.flushBodyContents(); err != nil { + v.logFunc("failed to write response", err) + } + }) +} + +type responseWrapper interface { + http.ResponseWriter + + // flushBodyContents writes the buffered response to the client, if it has + // not yet been written. + flushBodyContents() error + + // statusCode returns the response status code, 0 if not set yet. + statusCode() int + + // bodyContents returns the buffered + bodyContents() []byte +} + +type warnResponseWrapper struct { + w http.ResponseWriter + headerWritten bool + status int + body bytes.Buffer + tee io.Writer +} + +func newWarnResponseWrapper(w http.ResponseWriter) *warnResponseWrapper { + wr := &warnResponseWrapper{ + w: w, + } + wr.tee = io.MultiWriter(w, &wr.body) + return wr +} + +// Write implements http.ResponseWriter. +func (wr *warnResponseWrapper) Write(b []byte) (int, error) { + if !wr.headerWritten { + wr.WriteHeader(http.StatusOK) + } + return wr.tee.Write(b) +} + +// WriteHeader implements http.ResponseWriter. +func (wr *warnResponseWrapper) WriteHeader(status int) { + if !wr.headerWritten { + // If the header hasn't been written, record the status for response + // validation. + wr.status = status + wr.headerWritten = true + } + wr.w.WriteHeader(wr.status) +} + +// Header implements http.ResponseWriter. +func (wr *warnResponseWrapper) Header() http.Header { + return wr.w.Header() +} + +// Flush implements the optional http.Flusher interface. +func (wr *warnResponseWrapper) Flush() { + // If the wrapped http.ResponseWriter implements optional http.Flusher, + // pass through. + if fl, ok := wr.w.(http.Flusher); ok { + fl.Flush() + } +} + +func (wr *warnResponseWrapper) flushBodyContents() error { + return nil +} + +func (wr *warnResponseWrapper) statusCode() int { + return wr.status +} + +func (wr *warnResponseWrapper) bodyContents() []byte { + return wr.body.Bytes() +} + +type strictResponseWrapper struct { + w http.ResponseWriter + headerWritten bool + status int + body bytes.Buffer +} + +// Write implements http.ResponseWriter. +func (wr *strictResponseWrapper) Write(b []byte) (int, error) { + if !wr.headerWritten { + wr.WriteHeader(http.StatusOK) + } + return wr.body.Write(b) +} + +// WriteHeader implements http.ResponseWriter. +func (wr *strictResponseWrapper) WriteHeader(status int) { + if !wr.headerWritten { + wr.status = status + wr.headerWritten = true + } +} + +// Header implements http.ResponseWriter. +func (wr *strictResponseWrapper) Header() http.Header { + return wr.w.Header() +} + +func (wr *strictResponseWrapper) flushBodyContents() error { + wr.w.WriteHeader(wr.status) + _, err := wr.w.Write(wr.body.Bytes()) + return err +} + +func (wr *strictResponseWrapper) statusCode() int { + return wr.status +} + +func (wr *strictResponseWrapper) bodyContents() []byte { + return wr.body.Bytes() +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go new file mode 100644 index 00000000000..1622339e21b --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go @@ -0,0 +1,24 @@ +package openapi3filter + +// DefaultOptions do not set an AuthenticationFunc. +// A spec with security schemes defined will not pass validation +// unless an AuthenticationFunc is defined. +var DefaultOptions = &Options{} + +// Options used by ValidateRequest and ValidateResponse +type Options struct { + // Set ExcludeRequestBody so ValidateRequest skips request body validation + ExcludeRequestBody bool + + // Set ExcludeResponseBody so ValidateResponse skips response body validation + ExcludeResponseBody bool + + // Set IncludeResponseStatus so ValidateResponse fails on response + // status not defined in OpenAPI spec + IncludeResponseStatus bool + + MultiError bool + + // See NoopAuthenticationFunc + AuthenticationFunc AuthenticationFunc +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go new file mode 100644 index 00000000000..12b36838436 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go @@ -0,0 +1,1027 @@ +package openapi3filter + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/getkin/kin-openapi/openapi3" +) + +// ParseErrorKind describes a kind of ParseError. +// The type simplifies comparison of errors. +type ParseErrorKind int + +const ( + // KindOther describes an untyped parsing error. + KindOther ParseErrorKind = iota + // KindUnsupportedFormat describes an error that happens when a value has an unsupported format. + KindUnsupportedFormat + // KindInvalidFormat describes an error that happens when a value does not conform a format + // that is required by a serialization method. + KindInvalidFormat +) + +// ParseError describes errors which happens while parse operation's parameters, requestBody, or response. +type ParseError struct { + Kind ParseErrorKind + Value interface{} + Reason string + Cause error + + path []interface{} +} + +var _ interface{ Unwrap() error } = ParseError{} + +func (e *ParseError) Error() string { + var msg []string + if p := e.Path(); len(p) > 0 { + var arr []string + for _, v := range p { + arr = append(arr, fmt.Sprintf("%v", v)) + } + msg = append(msg, fmt.Sprintf("path %v", strings.Join(arr, "."))) + } + msg = append(msg, e.innerError()) + return strings.Join(msg, ": ") +} + +func (e *ParseError) innerError() string { + var msg []string + if e.Value != nil { + msg = append(msg, fmt.Sprintf("value %v", e.Value)) + } + if e.Reason != "" { + msg = append(msg, e.Reason) + } + if e.Cause != nil { + if v, ok := e.Cause.(*ParseError); ok { + msg = append(msg, v.innerError()) + } else { + msg = append(msg, e.Cause.Error()) + } + } + return strings.Join(msg, ": ") +} + +// RootCause returns a root cause of ParseError. +func (e *ParseError) RootCause() error { + if v, ok := e.Cause.(*ParseError); ok { + return v.RootCause() + } + return e.Cause +} + +func (e ParseError) Unwrap() error { + return e.Cause +} + +// Path returns a path to the root cause. +func (e *ParseError) Path() []interface{} { + var path []interface{} + if v, ok := e.Cause.(*ParseError); ok { + p := v.Path() + if len(p) > 0 { + path = append(path, p...) + } + } + if len(e.path) > 0 { + path = append(path, e.path...) + } + return path +} + +func invalidSerializationMethodErr(sm *openapi3.SerializationMethod) error { + return fmt.Errorf("invalid serialization method: style=%q, explode=%v", sm.Style, sm.Explode) +} + +// Decodes a parameter defined via the content property as an object. It uses +// the user specified decoder, or our build-in decoder for application/json +func decodeContentParameter(param *openapi3.Parameter, input *RequestValidationInput) ( + value interface{}, schema *openapi3.Schema, err error) { + + var paramValues []string + var found bool + switch param.In { + case openapi3.ParameterInPath: + var paramValue string + if paramValue, found = input.PathParams[param.Name]; found { + paramValues = []string{paramValue} + } + case openapi3.ParameterInQuery: + paramValues, found = input.GetQueryParams()[param.Name] + case openapi3.ParameterInHeader: + if paramValue := input.Request.Header.Get(http.CanonicalHeaderKey(param.Name)); paramValue != "" { + paramValues = []string{paramValue} + found = true + } + case openapi3.ParameterInCookie: + var cookie *http.Cookie + if cookie, err = input.Request.Cookie(param.Name); err == http.ErrNoCookie { + found = false + } else if err != nil { + return + } else { + paramValues = []string{cookie.Value} + found = true + } + default: + err = fmt.Errorf("unsupported parameter.in: %q", param.In) + return + } + + if !found { + if param.Required { + err = fmt.Errorf("parameter %q is required, but missing", param.Name) + } + return + } + + decoder := input.ParamDecoder + if decoder == nil { + decoder = defaultContentParameterDecoder + } + + value, schema, err = decoder(param, paramValues) + return +} + +func defaultContentParameterDecoder(param *openapi3.Parameter, values []string) ( + outValue interface{}, outSchema *openapi3.Schema, err error) { + // Only query parameters can have multiple values. + if len(values) > 1 && param.In != openapi3.ParameterInQuery { + err = fmt.Errorf("%s parameter %q cannot have multiple values", param.In, param.Name) + return + } + + content := param.Content + if content == nil { + err = fmt.Errorf("parameter %q expected to have content", param.Name) + return + } + + // We only know how to decode a parameter if it has one content, application/json + if len(content) != 1 { + err = fmt.Errorf("multiple content types for parameter %q", param.Name) + return + } + + mt := content.Get("application/json") + if mt == nil { + err = fmt.Errorf("parameter %q has no content schema", param.Name) + return + } + outSchema = mt.Schema.Value + + if len(values) == 1 { + if err = json.Unmarshal([]byte(values[0]), &outValue); err != nil { + err = fmt.Errorf("error unmarshaling parameter %q", param.Name) + return + } + } else { + outArray := make([]interface{}, 0, len(values)) + for _, v := range values { + var item interface{} + if err = json.Unmarshal([]byte(v), &item); err != nil { + err = fmt.Errorf("error unmarshaling parameter %q", param.Name) + return + } + outArray = append(outArray, item) + } + outValue = outArray + } + return +} + +type valueDecoder interface { + DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) + DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) + DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) +} + +// decodeStyledParameter returns a value of an operation's parameter from HTTP request for +// parameters defined using the style format. +// The function returns ParseError when HTTP request contains an invalid value of a parameter. +func decodeStyledParameter(param *openapi3.Parameter, input *RequestValidationInput) (interface{}, error) { + sm, err := param.SerializationMethod() + if err != nil { + return nil, err + } + + var dec valueDecoder + switch param.In { + case openapi3.ParameterInPath: + if len(input.PathParams) == 0 { + return nil, nil + } + dec = &pathParamDecoder{pathParams: input.PathParams} + case openapi3.ParameterInQuery: + if len(input.GetQueryParams()) == 0 { + return nil, nil + } + dec = &urlValuesDecoder{values: input.GetQueryParams()} + case openapi3.ParameterInHeader: + dec = &headerParamDecoder{header: input.Request.Header} + case openapi3.ParameterInCookie: + dec = &cookieParamDecoder{req: input.Request} + default: + return nil, fmt.Errorf("unsupported parameter's 'in': %s", param.In) + } + + return decodeValue(dec, param.Name, sm, param.Schema, param.Required) +} + +func decodeValue(dec valueDecoder, param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef, required bool) (interface{}, error) { + if len(schema.Value.AllOf) > 0 { + var value interface{} + var err error + for _, sr := range schema.Value.AllOf { + value, err = decodeValue(dec, param, sm, sr, required) + if value == nil || err != nil { + break + } + } + return value, err + } + + if len(schema.Value.AnyOf) > 0 { + for _, sr := range schema.Value.AnyOf { + value, _ := decodeValue(dec, param, sm, sr, required) + if value != nil { + return value, nil + } + } + if required { + return nil, fmt.Errorf("decoding anyOf for parameter %q failed", param) + } + return nil, nil + } + + if len(schema.Value.OneOf) > 0 { + isMatched := 0 + var value interface{} + for _, sr := range schema.Value.OneOf { + v, _ := decodeValue(dec, param, sm, sr, required) + if v != nil { + value = v + isMatched++ + } + } + if isMatched == 1 { + return value, nil + } else if isMatched > 1 { + return nil, fmt.Errorf("decoding oneOf failed: %d schemas matched", isMatched) + } + if required { + return nil, fmt.Errorf("decoding oneOf failed: %q is required", param) + } + return nil, nil + } + + if schema.Value.Not != nil { + // TODO(decode not): handle decoding "not" JSON Schema + return nil, errors.New("not implemented: decoding 'not'") + } + + if schema.Value.Type != "" { + var decodeFn func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) + switch schema.Value.Type { + case "array": + decodeFn = func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + return dec.DecodeArray(param, sm, schema) + } + case "object": + decodeFn = func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + return dec.DecodeObject(param, sm, schema) + } + default: + decodeFn = dec.DecodePrimitive + } + return decodeFn(param, sm, schema) + } + + return nil, nil +} + +// pathParamDecoder decodes values of path parameters. +type pathParamDecoder struct { + pathParams map[string]string +} + +func (d *pathParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + var prefix string + switch sm.Style { + case "simple": + // A prefix is empty for style "simple". + case "label": + prefix = "." + case "matrix": + prefix = ";" + param + "=" + default: + return nil, invalidSerializationMethodErr(sm) + } + + if d.pathParams == nil { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + raw, ok := d.pathParams[param] + if !ok || raw == "" { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + src, err := cutPrefix(raw, prefix) + if err != nil { + return nil, err + } + return parsePrimitive(src, schema) +} + +func (d *pathParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { + var prefix, delim string + switch { + case sm.Style == "simple": + delim = "," + case sm.Style == "label" && !sm.Explode: + prefix = "." + delim = "," + case sm.Style == "label" && sm.Explode: + prefix = "." + delim = "." + case sm.Style == "matrix" && !sm.Explode: + prefix = ";" + param + "=" + delim = "," + case sm.Style == "matrix" && sm.Explode: + prefix = ";" + param + "=" + delim = ";" + param + "=" + default: + return nil, invalidSerializationMethodErr(sm) + } + + if d.pathParams == nil { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + raw, ok := d.pathParams[param] + if !ok || raw == "" { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + src, err := cutPrefix(raw, prefix) + if err != nil { + return nil, err + } + return parseArray(strings.Split(src, delim), schema) +} + +func (d *pathParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { + var prefix, propsDelim, valueDelim string + switch { + case sm.Style == "simple" && !sm.Explode: + propsDelim = "," + valueDelim = "," + case sm.Style == "simple" && sm.Explode: + propsDelim = "," + valueDelim = "=" + case sm.Style == "label" && !sm.Explode: + prefix = "." + propsDelim = "," + valueDelim = "," + case sm.Style == "label" && sm.Explode: + prefix = "." + propsDelim = "." + valueDelim = "=" + case sm.Style == "matrix" && !sm.Explode: + prefix = ";" + param + "=" + propsDelim = "," + valueDelim = "," + case sm.Style == "matrix" && sm.Explode: + prefix = ";" + propsDelim = ";" + valueDelim = "=" + default: + return nil, invalidSerializationMethodErr(sm) + } + + if d.pathParams == nil { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + raw, ok := d.pathParams[param] + if !ok || raw == "" { + // HTTP request does not contains a value of the target path parameter. + return nil, nil + } + src, err := cutPrefix(raw, prefix) + if err != nil { + return nil, err + } + props, err := propsFromString(src, propsDelim, valueDelim) + if err != nil { + return nil, err + } + return makeObject(props, schema) +} + +// cutPrefix validates that a raw value of a path parameter has the specified prefix, +// and returns a raw value without the prefix. +func cutPrefix(raw, prefix string) (string, error) { + if prefix == "" { + return raw, nil + } + if len(raw) < len(prefix) || raw[:len(prefix)] != prefix { + return "", &ParseError{ + Kind: KindInvalidFormat, + Value: raw, + Reason: fmt.Sprintf("a value must be prefixed with %q", prefix), + } + } + return raw[len(prefix):], nil +} + +// urlValuesDecoder decodes values of query parameters. +type urlValuesDecoder struct { + values url.Values +} + +func (d *urlValuesDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + if sm.Style != "form" { + return nil, invalidSerializationMethodErr(sm) + } + + values := d.values[param] + if len(values) == 0 { + // HTTP request does not contain a value of the target query parameter. + return nil, nil + } + return parsePrimitive(values[0], schema) +} + +func (d *urlValuesDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { + if sm.Style == "deepObject" { + return nil, invalidSerializationMethodErr(sm) + } + + values := d.values[param] + if len(values) == 0 { + // HTTP request does not contain a value of the target query parameter. + return nil, nil + } + if !sm.Explode { + var delim string + switch sm.Style { + case "form": + delim = "," + case "spaceDelimited": + delim = " " + case "pipeDelimited": + delim = "|" + } + values = strings.Split(values[0], delim) + } + return parseArray(values, schema) +} + +func (d *urlValuesDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { + var propsFn func(url.Values) (map[string]string, error) + switch sm.Style { + case "form": + propsFn = func(params url.Values) (map[string]string, error) { + if len(params) == 0 { + // HTTP request does not contain query parameters. + return nil, nil + } + if sm.Explode { + props := make(map[string]string) + for key, values := range params { + props[key] = values[0] + } + return props, nil + } + values := params[param] + if len(values) == 0 { + // HTTP request does not contain a value of the target query parameter. + return nil, nil + } + return propsFromString(values[0], ",", ",") + } + case "deepObject": + propsFn = func(params url.Values) (map[string]string, error) { + props := make(map[string]string) + for key, values := range params { + groups := regexp.MustCompile(fmt.Sprintf("%s\\[(.+?)\\]", param)).FindAllStringSubmatch(key, -1) + if len(groups) == 0 { + // A query parameter's name does not match the required format, so skip it. + continue + } + props[groups[0][1]] = values[0] + } + if len(props) == 0 { + // HTTP request does not contain query parameters encoded by rules of style "deepObject". + return nil, nil + } + return props, nil + } + default: + return nil, invalidSerializationMethodErr(sm) + } + + props, err := propsFn(d.values) + if err != nil { + return nil, err + } + if props == nil { + return nil, nil + } + return makeObject(props, schema) +} + +// headerParamDecoder decodes values of header parameters. +type headerParamDecoder struct { + header http.Header +} + +func (d *headerParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + if sm.Style != "simple" { + return nil, invalidSerializationMethodErr(sm) + } + + raw := d.header.Get(http.CanonicalHeaderKey(param)) + return parsePrimitive(raw, schema) +} + +func (d *headerParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { + if sm.Style != "simple" { + return nil, invalidSerializationMethodErr(sm) + } + + raw := d.header.Get(http.CanonicalHeaderKey(param)) + if raw == "" { + // HTTP request does not contains a corresponding header + return nil, nil + } + return parseArray(strings.Split(raw, ","), schema) +} + +func (d *headerParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { + if sm.Style != "simple" { + return nil, invalidSerializationMethodErr(sm) + } + valueDelim := "," + if sm.Explode { + valueDelim = "=" + } + + raw := d.header.Get(http.CanonicalHeaderKey(param)) + if raw == "" { + // HTTP request does not contain a corresponding header. + return nil, nil + } + props, err := propsFromString(raw, ",", valueDelim) + if err != nil { + return nil, err + } + return makeObject(props, schema) +} + +// cookieParamDecoder decodes values of cookie parameters. +type cookieParamDecoder struct { + req *http.Request +} + +func (d *cookieParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { + if sm.Style != "form" { + return nil, invalidSerializationMethodErr(sm) + } + + cookie, err := d.req.Cookie(param) + if err == http.ErrNoCookie { + // HTTP request does not contain a corresponding cookie. + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("decoding param %q: %s", param, err) + } + return parsePrimitive(cookie.Value, schema) +} + +func (d *cookieParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { + if sm.Style != "form" || sm.Explode { + return nil, invalidSerializationMethodErr(sm) + } + + cookie, err := d.req.Cookie(param) + if err == http.ErrNoCookie { + // HTTP request does not contain a corresponding cookie. + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("decoding param %q: %s", param, err) + } + return parseArray(strings.Split(cookie.Value, ","), schema) +} + +func (d *cookieParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { + if sm.Style != "form" || sm.Explode { + return nil, invalidSerializationMethodErr(sm) + } + + cookie, err := d.req.Cookie(param) + if err == http.ErrNoCookie { + // HTTP request does not contain a corresponding cookie. + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("decoding param %q: %s", param, err) + } + props, err := propsFromString(cookie.Value, ",", ",") + if err != nil { + return nil, err + } + return makeObject(props, schema) +} + +// propsFromString returns a properties map that is created by splitting a source string by propDelim and valueDelim. +// The source string must have a valid format: pairs separated by . +// The function returns an error when the source string has an invalid format. +func propsFromString(src, propDelim, valueDelim string) (map[string]string, error) { + props := make(map[string]string) + pairs := strings.Split(src, propDelim) + + // When propDelim and valueDelim is equal the source string follow the next rule: + // every even item of pairs is a properies's name, and the subsequent odd item is a property's value. + if propDelim == valueDelim { + // Taking into account the rule above, a valid source string must be splitted by propDelim + // to an array with an even number of items. + if len(pairs)%2 != 0 { + return nil, &ParseError{ + Kind: KindInvalidFormat, + Value: src, + Reason: fmt.Sprintf("a value must be a list of object's properties in format \"name%svalue\" separated by %s", valueDelim, propDelim), + } + } + for i := 0; i < len(pairs)/2; i++ { + props[pairs[i*2]] = pairs[i*2+1] + } + return props, nil + } + + // When propDelim and valueDelim is not equal the source string follow the next rule: + // every item of pairs is a string that follows format . + for _, pair := range pairs { + prop := strings.Split(pair, valueDelim) + if len(prop) != 2 { + return nil, &ParseError{ + Kind: KindInvalidFormat, + Value: src, + Reason: fmt.Sprintf("a value must be a list of object's properties in format \"name%svalue\" separated by %s", valueDelim, propDelim), + } + } + props[prop[0]] = prop[1] + } + return props, nil +} + +// makeObject returns an object that contains properties from props. +// A value of every property is parsed as a primitive value. +// The function returns an error when an error happened while parse object's properties. +func makeObject(props map[string]string, schema *openapi3.SchemaRef) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + for propName, propSchema := range schema.Value.Properties { + value, err := parsePrimitive(props[propName], propSchema) + if err != nil { + if v, ok := err.(*ParseError); ok { + return nil, &ParseError{path: []interface{}{propName}, Cause: v} + } + return nil, fmt.Errorf("property %q: %s", propName, err) + } + obj[propName] = value + } + return obj, nil +} + +// parseArray returns an array that contains items from a raw array. +// Every item is parsed as a primitive value. +// The function returns an error when an error happened while parse array's items. +func parseArray(raw []string, schemaRef *openapi3.SchemaRef) ([]interface{}, error) { + var value []interface{} + for i, v := range raw { + item, err := parsePrimitive(v, schemaRef.Value.Items) + if err != nil { + if v, ok := err.(*ParseError); ok { + return nil, &ParseError{path: []interface{}{i}, Cause: v} + } + return nil, fmt.Errorf("item %d: %s", i, err) + } + value = append(value, item) + } + return value, nil +} + +// parsePrimitive returns a value that is created by parsing a source string to a primitive type +// that is specified by a schema. The function returns nil when the source string is empty. +// The function panics when a schema has a non primitive type. +func parsePrimitive(raw string, schema *openapi3.SchemaRef) (interface{}, error) { + if raw == "" { + return nil, nil + } + switch schema.Value.Type { + case "integer": + v, err := strconv.ParseFloat(raw, 64) + if err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid integer", Cause: err} + } + return v, nil + case "number": + v, err := strconv.ParseFloat(raw, 64) + if err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid number", Cause: err} + } + return v, nil + case "boolean": + v, err := strconv.ParseBool(raw) + if err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid number", Cause: err} + } + return v, nil + case "string": + return raw, nil + default: + panic(fmt.Sprintf("schema has non primitive type %q", schema.Value.Type)) + } +} + +// EncodingFn is a function that returns an encoding of a request body's part. +type EncodingFn func(partName string) *openapi3.Encoding + +// BodyDecoder is an interface to decode a body of a request or response. +// An implementation must return a value that is a primitive, []interface{}, or map[string]interface{}. +type BodyDecoder func(io.Reader, http.Header, *openapi3.SchemaRef, EncodingFn) (interface{}, error) + +// bodyDecoders contains decoders for supported content types of a body. +// By default, there is content type "application/json" is supported only. +var bodyDecoders = make(map[string]BodyDecoder) + +// RegisteredBodyDecoder returns the registered body decoder for the given content type. +// +// If no decoder was registered for the given content type, nil is returned. +// This call is not thread-safe: body decoders should not be created/destroyed by multiple goroutines. +func RegisteredBodyDecoder(contentType string) BodyDecoder { + return bodyDecoders[contentType] +} + +// RegisterBodyDecoder registers a request body's decoder for a content type. +// +// If a decoder for the specified content type already exists, the function replaces +// it with the specified decoder. +// This call is not thread-safe: body decoders should not be created/destroyed by multiple goroutines. +func RegisterBodyDecoder(contentType string, decoder BodyDecoder) { + if contentType == "" { + panic("contentType is empty") + } + if decoder == nil { + panic("decoder is not defined") + } + bodyDecoders[contentType] = decoder +} + +// UnregisterBodyDecoder dissociates a body decoder from a content type. +// +// Decoding this content type will result in an error. +// This call is not thread-safe: body decoders should not be created/destroyed by multiple goroutines. +func UnregisterBodyDecoder(contentType string) { + if contentType == "" { + panic("contentType is empty") + } + delete(bodyDecoders, contentType) +} + +var headerCT = http.CanonicalHeaderKey("Content-Type") + +const prefixUnsupportedCT = "unsupported content type" + +// decodeBody returns a decoded body. +// The function returns ParseError when a body is invalid. +func decodeBody(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + contentType := header.Get(headerCT) + if contentType == "" { + if _, ok := body.(*multipart.Part); ok { + contentType = "text/plain" + } + } + mediaType := parseMediaType(contentType) + decoder, ok := bodyDecoders[mediaType] + if !ok { + return nil, &ParseError{ + Kind: KindUnsupportedFormat, + Reason: fmt.Sprintf("%s %q", prefixUnsupportedCT, mediaType), + } + } + value, err := decoder(body, header, schema, encFn) + if err != nil { + return nil, err + } + return value, nil +} + +func init() { + RegisterBodyDecoder("text/plain", plainBodyDecoder) + RegisterBodyDecoder("application/json", jsonBodyDecoder) + RegisterBodyDecoder("application/x-yaml", yamlBodyDecoder) + RegisterBodyDecoder("application/yaml", yamlBodyDecoder) + RegisterBodyDecoder("application/problem+json", jsonBodyDecoder) + RegisterBodyDecoder("application/x-www-form-urlencoded", urlencodedBodyDecoder) + RegisterBodyDecoder("multipart/form-data", multipartBodyDecoder) + RegisterBodyDecoder("application/octet-stream", FileBodyDecoder) +} + +func plainBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + data, err := ioutil.ReadAll(body) + if err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Cause: err} + } + return string(data), nil +} + +func jsonBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + var value interface{} + if err := json.NewDecoder(body).Decode(&value); err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Cause: err} + } + return value, nil +} + +func yamlBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + var value interface{} + if err := yaml.NewDecoder(body).Decode(&value); err != nil { + return nil, &ParseError{Kind: KindInvalidFormat, Cause: err} + } + return value, nil +} + +func urlencodedBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + // Validate schema of request body. + // By the OpenAPI 3 specification request body's schema must have type "object". + // Properties of the schema describes individual parts of request body. + if schema.Value.Type != "object" { + return nil, errors.New("unsupported schema of request body") + } + for propName, propSchema := range schema.Value.Properties { + switch propSchema.Value.Type { + case "object": + return nil, fmt.Errorf("unsupported schema of request body's property %q", propName) + case "array": + items := propSchema.Value.Items.Value + if items.Type != "string" && items.Type != "integer" && items.Type != "number" && items.Type != "boolean" { + return nil, fmt.Errorf("unsupported schema of request body's property %q", propName) + } + } + } + + // Parse form. + b, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + values, err := url.ParseQuery(string(b)) + if err != nil { + return nil, err + } + + // Make an object value from form values. + obj := make(map[string]interface{}) + dec := &urlValuesDecoder{values: values} + for name, prop := range schema.Value.Properties { + var ( + value interface{} + enc *openapi3.Encoding + ) + if encFn != nil { + enc = encFn(name) + } + sm := enc.SerializationMethod() + + if value, err = decodeValue(dec, name, sm, prop, false); err != nil { + return nil, err + } + obj[name] = value + } + + return obj, nil +} + +func multipartBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + if schema.Value.Type != "object" { + return nil, errors.New("unsupported schema of request body") + } + + // Parse form. + values := make(map[string][]interface{}) + contentType := header.Get(headerCT) + _, params, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, err + } + mr := multipart.NewReader(body, params["boundary"]) + for { + var part *multipart.Part + if part, err = mr.NextPart(); err == io.EOF { + break + } + if err != nil { + return nil, err + } + + var ( + name = part.FormName() + enc *openapi3.Encoding + ) + if encFn != nil { + enc = encFn(name) + } + subEncFn := func(string) *openapi3.Encoding { return enc } + // If the property's schema has type "array" it is means that the form contains a few parts with the same name. + // Every such part has a type that is defined by an items schema in the property's schema. + var valueSchema *openapi3.SchemaRef + var exists bool + valueSchema, exists = schema.Value.Properties[name] + if !exists { + anyProperties := schema.Value.AdditionalPropertiesAllowed + if anyProperties != nil { + switch *anyProperties { + case true: + //additionalProperties: true + continue + default: + //additionalProperties: false + return nil, &ParseError{Kind: KindOther, Cause: fmt.Errorf("part %s: undefined", name)} + } + } + if schema.Value.AdditionalProperties == nil { + return nil, &ParseError{Kind: KindOther, Cause: fmt.Errorf("part %s: undefined", name)} + } + valueSchema, exists = schema.Value.AdditionalProperties.Value.Properties[name] + if !exists { + return nil, &ParseError{Kind: KindOther, Cause: fmt.Errorf("part %s: undefined", name)} + } + } + if valueSchema.Value.Type == "array" { + valueSchema = valueSchema.Value.Items + } + + var value interface{} + if value, err = decodeBody(part, http.Header(part.Header), valueSchema, subEncFn); err != nil { + if v, ok := err.(*ParseError); ok { + return nil, &ParseError{path: []interface{}{name}, Cause: v} + } + return nil, fmt.Errorf("part %s: %s", name, err) + } + values[name] = append(values[name], value) + } + + allTheProperties := make(map[string]*openapi3.SchemaRef) + for k, v := range schema.Value.Properties { + allTheProperties[k] = v + } + if schema.Value.AdditionalProperties != nil { + for k, v := range schema.Value.AdditionalProperties.Value.Properties { + allTheProperties[k] = v + } + } + // Make an object value from form values. + obj := make(map[string]interface{}) + for name, prop := range allTheProperties { + vv := values[name] + if len(vv) == 0 { + continue + } + if prop.Value.Type == "array" { + obj[name] = vv + } else { + obj[name] = vv[0] + } + } + + return obj, nil +} + +// FileBodyDecoder is a body decoder that decodes a file body to a string. +func FileBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { + data, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + return string(data), nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go new file mode 100644 index 00000000000..990b299efca --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go @@ -0,0 +1,323 @@ +package openapi3filter + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "sort" + + "github.com/getkin/kin-openapi/openapi3" +) + +// ErrAuthenticationServiceMissing is returned when no authentication service +// is defined for the request validator +var ErrAuthenticationServiceMissing = errors.New("missing AuthenticationFunc") + +// ErrInvalidRequired is returned when a required value of a parameter or request body is not defined. +var ErrInvalidRequired = errors.New("value is required but missing") + +// ValidateRequest is used to validate the given input according to previous +// loaded OpenAPIv3 spec. If the input does not match the OpenAPIv3 spec, a +// non-nil error will be returned. +// +// Note: One can tune the behavior of uniqueItems: true verification +// by registering a custom function with openapi3.RegisterArrayUniqueItemsChecker +func ValidateRequest(ctx context.Context, input *RequestValidationInput) error { + var ( + err error + me openapi3.MultiError + ) + + options := input.Options + if options == nil { + options = DefaultOptions + } + route := input.Route + operation := route.Operation + operationParameters := operation.Parameters + pathItemParameters := route.PathItem.Parameters + + // For each parameter of the PathItem + for _, parameterRef := range pathItemParameters { + parameter := parameterRef.Value + if operationParameters != nil { + if override := operationParameters.GetByInAndName(parameter.In, parameter.Name); override != nil { + continue + } + } + + if err = ValidateParameter(ctx, input, parameter); err != nil && !options.MultiError { + return err + } + + if err != nil { + me = append(me, err) + } + } + + // For each parameter of the Operation + for _, parameter := range operationParameters { + if err = ValidateParameter(ctx, input, parameter.Value); err != nil && !options.MultiError { + return err + } + + if err != nil { + me = append(me, err) + } + } + + // RequestBody + requestBody := operation.RequestBody + if requestBody != nil && !options.ExcludeRequestBody { + if err = ValidateRequestBody(ctx, input, requestBody.Value); err != nil && !options.MultiError { + return err + } + + if err != nil { + me = append(me, err) + } + } + + // Security + security := operation.Security + // If there aren't any security requirements for the operation + if security == nil { + // Use the global security requirements. + security = &route.Spec.Security + } + if security != nil { + if err = ValidateSecurityRequirements(ctx, input, *security); err != nil && !options.MultiError { + return err + } + + if err != nil { + me = append(me, err) + } + } + + if len(me) > 0 { + return me + } + + return nil +} + +// ValidateParameter validates a parameter's value by JSON schema. +// The function returns RequestError with a ParseError cause when unable to parse a value. +// The function returns RequestError with ErrInvalidRequired cause when a value of a required parameter is not defined. +// The function returns RequestError with a openapi3.SchemaError cause when a value is invalid by JSON schema. +func ValidateParameter(ctx context.Context, input *RequestValidationInput, parameter *openapi3.Parameter) error { + if parameter.Schema == nil && parameter.Content == nil { + // We have no schema for the parameter. Assume that everything passes + // a schema-less check, but this could also be an error. The OpenAPI + // validation allows this to happen. + return nil + } + + options := input.Options + if options == nil { + options = DefaultOptions + } + + var value interface{} + var err error + var schema *openapi3.Schema + + // Validation will ensure that we either have content or schema. + if parameter.Content != nil { + if value, schema, err = decodeContentParameter(parameter, input); err != nil { + return &RequestError{Input: input, Parameter: parameter, Err: err} + } + } else { + if value, err = decodeStyledParameter(parameter, input); err != nil { + return &RequestError{Input: input, Parameter: parameter, Err: err} + } + schema = parameter.Schema.Value + } + // Validate a parameter's value. + if value == nil { + if parameter.Required { + return &RequestError{Input: input, Parameter: parameter, Err: ErrInvalidRequired} + } + return nil + } + if schema == nil { + // A parameter's schema is not defined so skip validation of a parameter's value. + return nil + } + + var opts []openapi3.SchemaValidationOption + if options.MultiError { + opts = make([]openapi3.SchemaValidationOption, 0, 1) + opts = append(opts, openapi3.MultiErrors()) + } + if err = schema.VisitJSON(value, opts...); err != nil { + return &RequestError{Input: input, Parameter: parameter, Err: err} + } + return nil +} + +const prefixInvalidCT = "header Content-Type has unexpected value" + +// ValidateRequestBody validates data of a request's body. +// +// The function returns RequestError with ErrInvalidRequired cause when a value is required but not defined. +// The function returns RequestError with a openapi3.SchemaError cause when a value is invalid by JSON schema. +func ValidateRequestBody(ctx context.Context, input *RequestValidationInput, requestBody *openapi3.RequestBody) error { + var ( + req = input.Request + data []byte + ) + + options := input.Options + if options == nil { + options = DefaultOptions + } + + if req.Body != http.NoBody && req.Body != nil { + defer req.Body.Close() + var err error + if data, err = ioutil.ReadAll(req.Body); err != nil { + return &RequestError{ + Input: input, + RequestBody: requestBody, + Reason: "reading failed", + Err: err, + } + } + // Put the data back into the input + req.Body = ioutil.NopCloser(bytes.NewReader(data)) + } + + if len(data) == 0 { + if requestBody.Required { + return &RequestError{Input: input, RequestBody: requestBody, Err: ErrInvalidRequired} + } + return nil + } + + content := requestBody.Content + if len(content) == 0 { + // A request's body does not have declared content, so skip validation. + return nil + } + + inputMIME := req.Header.Get(headerCT) + contentType := requestBody.Content.Get(inputMIME) + if contentType == nil { + return &RequestError{ + Input: input, + RequestBody: requestBody, + Reason: fmt.Sprintf("%s %q", prefixInvalidCT, inputMIME), + } + } + + if contentType.Schema == nil { + // A JSON schema that describes the received data is not declared, so skip validation. + return nil + } + + encFn := func(name string) *openapi3.Encoding { return contentType.Encoding[name] } + value, err := decodeBody(bytes.NewReader(data), req.Header, contentType.Schema, encFn) + if err != nil { + return &RequestError{ + Input: input, + RequestBody: requestBody, + Reason: "failed to decode request body", + Err: err, + } + } + + opts := make([]openapi3.SchemaValidationOption, 0, 2) // 2 potential opts here + opts = append(opts, openapi3.VisitAsRequest()) + if options.MultiError { + opts = append(opts, openapi3.MultiErrors()) + } + + // Validate JSON with the schema + if err := contentType.Schema.Value.VisitJSON(value, opts...); err != nil { + return &RequestError{ + Input: input, + RequestBody: requestBody, + Reason: "doesn't match the schema", + Err: err, + } + } + return nil +} + +// ValidateSecurityRequirements goes through multiple OpenAPI 3 security +// requirements in order and returns nil on the first valid requirement. +// If no requirement is met, errors are returned in order. +func ValidateSecurityRequirements(ctx context.Context, input *RequestValidationInput, srs openapi3.SecurityRequirements) error { + if len(srs) == 0 { + return nil + } + var errs []error + for _, sr := range srs { + if err := validateSecurityRequirement(ctx, input, sr); err != nil { + if len(errs) == 0 { + errs = make([]error, 0, len(srs)) + } + errs = append(errs, err) + continue + } + return nil + } + return &SecurityRequirementsError{ + SecurityRequirements: srs, + Errors: errs, + } +} + +// validateSecurityRequirement validates a single OpenAPI 3 security requirement +func validateSecurityRequirement(ctx context.Context, input *RequestValidationInput, securityRequirement openapi3.SecurityRequirement) error { + doc := input.Route.Spec + securitySchemes := doc.Components.SecuritySchemes + + // Ensure deterministic order + names := make([]string, 0, len(securityRequirement)) + for name := range securityRequirement { + names = append(names, name) + } + sort.Strings(names) + + // Get authentication function + options := input.Options + if options == nil { + options = DefaultOptions + } + f := options.AuthenticationFunc + if f == nil { + return ErrAuthenticationServiceMissing + } + + // For each scheme for the requirement + for _, name := range names { + var securityScheme *openapi3.SecurityScheme + if securitySchemes != nil { + if ref := securitySchemes[name]; ref != nil { + securityScheme = ref.Value + } + } + if securityScheme == nil { + return &RequestError{ + Input: input, + Err: fmt.Errorf("security scheme %q is not declared", name), + } + } + scopes := securityRequirement[name] + if err := f(ctx, &AuthenticationInput{ + RequestValidationInput: input, + SecuritySchemeName: name, + SecurityScheme: securityScheme, + Scopes: scopes, + }); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go new file mode 100644 index 00000000000..91dd102b689 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go @@ -0,0 +1,38 @@ +package openapi3filter + +import ( + "net/http" + "net/url" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/routers" +) + +// A ContentParameterDecoder takes a parameter definition from the OpenAPI spec, +// and the value which we received for it. It is expected to return the +// value unmarshaled into an interface which can be traversed for +// validation, it should also return the schema to be used for validating the +// object, since there can be more than one in the content spec. +// +// If a query parameter appears multiple times, values[] will have more +// than one value, but for all other parameter types it should have just +// one. +type ContentParameterDecoder func(param *openapi3.Parameter, values []string) (interface{}, *openapi3.Schema, error) + +type RequestValidationInput struct { + Request *http.Request + PathParams map[string]string + QueryParams url.Values + Route *routers.Route + Options *Options + ParamDecoder ContentParameterDecoder +} + +func (input *RequestValidationInput) GetQueryParams() url.Values { + q := input.QueryParams + if q == nil { + q = input.Request.URL.Query() + input.QueryParams = q + } + return q +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go new file mode 100644 index 00000000000..7cb713ace81 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go @@ -0,0 +1,138 @@ +// Package openapi3filter validates that requests and inputs request an OpenAPI 3 specification file. +package openapi3filter + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/http" + + "github.com/getkin/kin-openapi/openapi3" +) + +// ValidateResponse is used to validate the given input according to previous +// loaded OpenAPIv3 spec. If the input does not match the OpenAPIv3 spec, a +// non-nil error will be returned. +// +// Note: One can tune the behavior of uniqueItems: true verification +// by registering a custom function with openapi3.RegisterArrayUniqueItemsChecker +func ValidateResponse(ctx context.Context, input *ResponseValidationInput) error { + req := input.RequestValidationInput.Request + switch req.Method { + case "HEAD": + return nil + } + status := input.Status + + // These status codes will never be validated. + // TODO: The list is probably missing some. + switch status { + case http.StatusNotModified, + http.StatusPermanentRedirect, + http.StatusTemporaryRedirect, + http.StatusMovedPermanently: + return nil + } + route := input.RequestValidationInput.Route + options := input.Options + if options == nil { + options = DefaultOptions + } + + // Find input for the current status + responses := route.Operation.Responses + if len(responses) == 0 { + return nil + } + responseRef := responses.Get(status) // Response + if responseRef == nil { + responseRef = responses.Default() // Default input + } + if responseRef == nil { + // By default, status that is not documented is allowed. + if !options.IncludeResponseStatus { + return nil + } + return &ResponseError{Input: input, Reason: "status is not supported"} + } + response := responseRef.Value + if response == nil { + return &ResponseError{Input: input, Reason: "response has not been resolved"} + } + + if options.ExcludeResponseBody { + // A user turned off validation of a response's body. + return nil + } + + content := response.Content + if len(content) == 0 || options.ExcludeResponseBody { + // An operation does not contains a validation schema for responses with this status code. + return nil + } + + inputMIME := input.Header.Get(headerCT) + contentType := content.Get(inputMIME) + if contentType == nil { + return &ResponseError{ + Input: input, + Reason: fmt.Sprintf("response header Content-Type has unexpected value: %q", inputMIME), + } + } + + if contentType.Schema == nil { + // An operation does not contains a validation schema for responses with this status code. + return nil + } + + // Read response's body. + body := input.Body + + // Response would contain partial or empty input body + // after we begin reading. + // Ensure that this doesn't happen. + input.Body = nil + + // Ensure we close the reader + defer body.Close() + + // Read all + data, err := ioutil.ReadAll(body) + if err != nil { + return &ResponseError{ + Input: input, + Reason: "failed to read response body", + Err: err, + } + } + + // Put the data back into the response. + input.SetBodyBytes(data) + + encFn := func(name string) *openapi3.Encoding { return contentType.Encoding[name] } + value, err := decodeBody(bytes.NewBuffer(data), input.Header, contentType.Schema, encFn) + if err != nil { + return &ResponseError{ + Input: input, + Reason: "failed to decode response body", + Err: err, + } + } + + opts := make([]openapi3.SchemaValidationOption, 0, 2) // 2 potential opts here + opts = append(opts, openapi3.VisitAsRequest()) + if options.MultiError { + opts = append(opts, openapi3.MultiErrors()) + } + + // Validate data with the schema. + if err := contentType.Schema.Value.VisitJSON(value, opts...); err != nil { + return &ResponseError{ + Input: input, + Reason: "response body doesn't match the schema", + Err: err, + } + } + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go new file mode 100644 index 00000000000..edf38730a5e --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go @@ -0,0 +1,42 @@ +package openapi3filter + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +type ResponseValidationInput struct { + RequestValidationInput *RequestValidationInput + Status int + Header http.Header + Body io.ReadCloser + Options *Options +} + +func (input *ResponseValidationInput) SetBodyBytes(value []byte) *ResponseValidationInput { + input.Body = ioutil.NopCloser(bytes.NewReader(value)) + return input +} + +var JSONPrefixes = []string{ + ")]}',\n", +} + +// TrimJSONPrefix trims one of the possible prefixes +func TrimJSONPrefix(data []byte) []byte { +search: + for _, prefix := range JSONPrefixes { + if len(data) < len(prefix) { + continue + } + for i, b := range data[:len(prefix)] { + if b != prefix[i] { + continue search + } + } + return data[len(prefix):] + } + return data +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error.go new file mode 100644 index 00000000000..bfeeaa7daa2 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error.go @@ -0,0 +1,85 @@ +package openapi3filter + +import ( + "bytes" + "strconv" +) + +// ValidationError struct provides granular error information +// useful for communicating issues back to end user and developer. +// Based on https://jsonapi.org/format/#error-objects +type ValidationError struct { + // A unique identifier for this particular occurrence of the problem. + Id string `json:"id,omitempty"` + // The HTTP status code applicable to this problem. + Status int `json:"status,omitempty"` + // An application-specific error code, expressed as a string value. + Code string `json:"code,omitempty"` + // A short, human-readable summary of the problem. It **SHOULD NOT** change from occurrence to occurrence of the problem, except for purposes of localization. + Title string `json:"title,omitempty"` + // A human-readable explanation specific to this occurrence of the problem. + Detail string `json:"detail,omitempty"` + // An object containing references to the source of the error + Source *ValidationErrorSource `json:"source,omitempty"` +} + +// ValidationErrorSource struct +type ValidationErrorSource struct { + // A JSON Pointer [RFC6901] to the associated entity in the request document [e.g. \"/data\" for a primary data object, or \"/data/attributes/title\" for a specific attribute]. + Pointer string `json:"pointer,omitempty"` + // A string indicating which query parameter caused the error. + Parameter string `json:"parameter,omitempty"` +} + +var _ error = &ValidationError{} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + b := new(bytes.Buffer) + b.WriteString("[") + if e.Status != 0 { + b.WriteString(strconv.Itoa(e.Status)) + } + b.WriteString("]") + b.WriteString("[") + if e.Code != "" { + b.WriteString(e.Code) + } + b.WriteString("]") + b.WriteString("[") + if e.Id != "" { + b.WriteString(e.Id) + } + b.WriteString("]") + b.WriteString(" ") + if e.Title != "" { + b.WriteString(e.Title) + b.WriteString(" ") + } + if e.Detail != "" { + b.WriteString("| ") + b.WriteString(e.Detail) + b.WriteString(" ") + } + if e.Source != nil { + b.WriteString("[source ") + if e.Source.Parameter != "" { + b.WriteString("parameter=") + b.WriteString(e.Source.Parameter) + } else if e.Source.Pointer != "" { + b.WriteString("pointer=") + b.WriteString(e.Source.Pointer) + } + b.WriteString("]") + } + + if b.Len() == 0 { + return "no error" + } + return b.String() +} + +// StatusCode implements the StatusCoder interface for DefaultErrorEncoder +func (e *ValidationError) StatusCode() int { + return e.Status +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error_encoder.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error_encoder.go new file mode 100644 index 00000000000..205186960fa --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_error_encoder.go @@ -0,0 +1,170 @@ +package openapi3filter + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/routers" +) + +// ValidationErrorEncoder wraps a base ErrorEncoder to handle ValidationErrors +type ValidationErrorEncoder struct { + Encoder ErrorEncoder +} + +// Encode implements the ErrorEncoder interface for encoding ValidationErrors +func (enc *ValidationErrorEncoder) Encode(ctx context.Context, err error, w http.ResponseWriter) { + if e, ok := err.(*routers.RouteError); ok { + cErr := convertRouteError(e) + enc.Encoder(ctx, cErr, w) + return + } + + e, ok := err.(*RequestError) + if !ok { + enc.Encoder(ctx, err, w) + return + } + + var cErr *ValidationError + if e.Err == nil { + cErr = convertBasicRequestError(e) + } else if e.Err == ErrInvalidRequired { + cErr = convertErrInvalidRequired(e) + } else if innerErr, ok := e.Err.(*ParseError); ok { + cErr = convertParseError(e, innerErr) + } else if innerErr, ok := e.Err.(*openapi3.SchemaError); ok { + cErr = convertSchemaError(e, innerErr) + } + + if cErr != nil { + enc.Encoder(ctx, cErr, w) + return + } + enc.Encoder(ctx, err, w) +} + +func convertRouteError(e *routers.RouteError) *ValidationError { + status := http.StatusNotFound + if e.Error() == routers.ErrMethodNotAllowed.Error() { + status = http.StatusMethodNotAllowed + } + return &ValidationError{Status: status, Title: e.Error()} +} + +func convertBasicRequestError(e *RequestError) *ValidationError { + if strings.HasPrefix(e.Reason, prefixInvalidCT) { + if strings.HasSuffix(e.Reason, `""`) { + return &ValidationError{ + Status: http.StatusUnsupportedMediaType, + Title: "header Content-Type is required", + } + } + return &ValidationError{ + Status: http.StatusUnsupportedMediaType, + Title: prefixUnsupportedCT + strings.TrimPrefix(e.Reason, prefixInvalidCT), + } + } + return &ValidationError{ + Status: http.StatusBadRequest, + Title: e.Error(), + } +} + +func convertErrInvalidRequired(e *RequestError) *ValidationError { + if e.Err == ErrInvalidRequired && e.Parameter != nil { + return &ValidationError{ + Status: http.StatusBadRequest, + Title: fmt.Sprintf("parameter %q in %s is required", e.Parameter.Name, e.Parameter.In), + } + } + return &ValidationError{ + Status: http.StatusBadRequest, + Title: e.Error(), + } +} + +func convertParseError(e *RequestError, innerErr *ParseError) *ValidationError { + // We treat path params of the wrong type like a 404 instead of a 400 + if innerErr.Kind == KindInvalidFormat && e.Parameter != nil && e.Parameter.In == "path" { + return &ValidationError{ + Status: http.StatusNotFound, + Title: fmt.Sprintf("resource not found with %q value: %v", e.Parameter.Name, innerErr.Value), + } + } else if strings.HasPrefix(innerErr.Reason, prefixUnsupportedCT) { + return &ValidationError{ + Status: http.StatusUnsupportedMediaType, + Title: innerErr.Reason, + } + } else if innerErr.RootCause() != nil { + if rootErr, ok := innerErr.Cause.(*ParseError); ok && + rootErr.Kind == KindInvalidFormat && e.Parameter.In == "query" { + return &ValidationError{ + Status: http.StatusBadRequest, + Title: fmt.Sprintf("parameter %q in %s is invalid: %v is %s", + e.Parameter.Name, e.Parameter.In, rootErr.Value, rootErr.Reason), + } + } + return &ValidationError{ + Status: http.StatusBadRequest, + Title: innerErr.Reason, + } + } + return nil +} + +func convertSchemaError(e *RequestError, innerErr *openapi3.SchemaError) *ValidationError { + cErr := &ValidationError{Title: innerErr.Reason} + + // Handle "Origin" error + if originErr, ok := innerErr.Origin.(*openapi3.SchemaError); ok { + cErr = convertSchemaError(e, originErr) + } + + // Add http status code + if e.Parameter != nil { + cErr.Status = http.StatusBadRequest + } else if e.RequestBody != nil { + cErr.Status = http.StatusUnprocessableEntity + } + + // Add error source + if e.Parameter != nil { + // We have a JSONPointer in the query param too so need to + // make sure 'Parameter' check takes priority over 'Pointer' + cErr.Source = &ValidationErrorSource{Parameter: e.Parameter.Name} + } else if ptr := innerErr.JSONPointer(); ptr != nil { + cErr.Source = &ValidationErrorSource{Pointer: toJSONPointer(ptr)} + } + + // Add details on allowed values for enums + if innerErr.SchemaField == "enum" { + enums := make([]string, 0, len(innerErr.Schema.Enum)) + for _, enum := range innerErr.Schema.Enum { + enums = append(enums, fmt.Sprintf("%v", enum)) + } + cErr.Detail = fmt.Sprintf("value %v at %s must be one of: %s", + innerErr.Value, + toJSONPointer(innerErr.JSONPointer()), + strings.Join(enums, ", ")) + value := fmt.Sprintf("%v", innerErr.Value) + if e.Parameter != nil && + (e.Parameter.Explode == nil || *e.Parameter.Explode) && + (e.Parameter.Style == "" || e.Parameter.Style == "form") && + strings.Contains(value, ",") { + parts := strings.Split(value, ",") + cErr.Detail = fmt.Sprintf("%s; perhaps you intended '?%s=%s'", + cErr.Detail, + e.Parameter.Name, + strings.Join(parts, "&"+e.Parameter.Name+"=")) + } + } + return cErr +} + +func toJSONPointer(reversePath []string) string { + return "/" + strings.Join(reversePath, "/") +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_handler.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_handler.go new file mode 100644 index 00000000000..eeb1ca1ea89 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_handler.go @@ -0,0 +1,103 @@ +package openapi3filter + +import ( + "context" + "net/http" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/routers" + legacyrouter "github.com/getkin/kin-openapi/routers/legacy" +) + +type AuthenticationFunc func(context.Context, *AuthenticationInput) error + +func NoopAuthenticationFunc(context.Context, *AuthenticationInput) error { return nil } + +var _ AuthenticationFunc = NoopAuthenticationFunc + +type ValidationHandler struct { + Handler http.Handler + AuthenticationFunc AuthenticationFunc + File string + ErrorEncoder ErrorEncoder + router routers.Router +} + +func (h *ValidationHandler) Load() error { + loader := openapi3.NewLoader() + doc, err := loader.LoadFromFile(h.File) + if err != nil { + return err + } + if err := doc.Validate(loader.Context); err != nil { + return err + } + if h.router, err = legacyrouter.NewRouter(doc); err != nil { + return err + } + + // set defaults + if h.Handler == nil { + h.Handler = http.DefaultServeMux + } + if h.AuthenticationFunc == nil { + h.AuthenticationFunc = NoopAuthenticationFunc + } + if h.ErrorEncoder == nil { + h.ErrorEncoder = DefaultErrorEncoder + } + + return nil +} + +func (h *ValidationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if handled := h.before(w, r); handled { + return + } + // TODO: validateResponse + h.Handler.ServeHTTP(w, r) +} + +// Middleware implements gorilla/mux MiddlewareFunc +func (h *ValidationHandler) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if handled := h.before(w, r); handled { + return + } + // TODO: validateResponse + next.ServeHTTP(w, r) + }) +} + +func (h *ValidationHandler) before(w http.ResponseWriter, r *http.Request) (handled bool) { + if err := h.validateRequest(r); err != nil { + h.ErrorEncoder(r.Context(), err, w) + return true + } + return false +} + +func (h *ValidationHandler) validateRequest(r *http.Request) error { + // Find route + route, pathParams, err := h.router.FindRoute(r) + if err != nil { + return err + } + + options := &Options{ + AuthenticationFunc: h.AuthenticationFunc, + } + + // Validate request + requestValidationInput := &RequestValidationInput{ + Request: r, + PathParams: pathParams, + Route: route, + Options: options, + } + if err = ValidateRequest(r.Context(), requestValidationInput); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_kit.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_kit.go new file mode 100644 index 00000000000..9e11e4fc8dc --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/openapi3filter/validation_kit.go @@ -0,0 +1,85 @@ +package openapi3filter + +import ( + "context" + "encoding/json" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////////// +// We didn't want to tie kin-openapi too tightly with go-kit. +// This file contains the ErrorEncoder and DefaultErrorEncoder function +// borrowed from this project. +// +// The MIT License (MIT) +// +// Copyright (c) 2015 Peter Bourgon +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +/////////////////////////////////////////////////////////////////////////////////// + +// ErrorEncoder is responsible for encoding an error to the ResponseWriter. +// Users are encouraged to use custom ErrorEncoders to encode HTTP errors to +// their clients, and will likely want to pass and check for their own error +// types. See the example shipping/handling service. +type ErrorEncoder func(ctx context.Context, err error, w http.ResponseWriter) + +// StatusCoder is checked by DefaultErrorEncoder. If an error value implements +// StatusCoder, the StatusCode will be used when encoding the error. By default, +// StatusInternalServerError (500) is used. +type StatusCoder interface { + StatusCode() int +} + +// Headerer is checked by DefaultErrorEncoder. If an error value implements +// Headerer, the provided headers will be applied to the response writer, after +// the Content-Type is set. +type Headerer interface { + Headers() http.Header +} + +// DefaultErrorEncoder writes the error to the ResponseWriter, by default a +// content type of text/plain, a body of the plain text of the error, and a +// status code of 500. If the error implements Headerer, the provided headers +// will be applied to the response. If the error implements json.Marshaler, and +// the marshaling succeeds, a content type of application/json and the JSON +// encoded form of the error will be used. If the error implements StatusCoder, +// the provided StatusCode will be used instead of 500. +func DefaultErrorEncoder(_ context.Context, err error, w http.ResponseWriter) { + contentType, body := "text/plain; charset=utf-8", []byte(err.Error()) + if marshaler, ok := err.(json.Marshaler); ok { + if jsonBody, marshalErr := marshaler.MarshalJSON(); marshalErr == nil { + contentType, body = "application/json; charset=utf-8", jsonBody + } + } + w.Header().Set("Content-Type", contentType) + if headerer, ok := err.(Headerer); ok { + for k, values := range headerer.Headers() { + for _, v := range values { + w.Header().Add(k, v) + } + } + } + code := http.StatusInternalServerError + if sc, ok := err.(StatusCoder); ok { + code = sc.StatusCode() + } + w.WriteHeader(code) + w.Write(body) +} diff --git a/vendor/github.com/getkin/kin-openapi/routers/gorillamux/router.go b/vendor/github.com/getkin/kin-openapi/routers/gorillamux/router.go new file mode 100644 index 00000000000..83bbf829e20 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/routers/gorillamux/router.go @@ -0,0 +1,209 @@ +// Package gorillamux implements a router. +// +// It differs from the legacy router: +// * it provides somewhat granular errors: "path not found", "method not allowed". +// * it handles matching routes with extensions (e.g. /books/{id}.json) +// * it handles path patterns with a different syntax (e.g. /params/{x}/{y}/{z:.*}) +package gorillamux + +import ( + "net/http" + "net/url" + "sort" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/routers" + "github.com/gorilla/mux" +) + +var _ routers.Router = &Router{} + +// Router helps link http.Request.s and an OpenAPIv3 spec +type Router struct { + muxes []*mux.Route + routes []*routers.Route +} + +// NewRouter creates a gorilla/mux router. +// Assumes spec is .Validate()d +// TODO: Handle/HandlerFunc + ServeHTTP (When there is a match, the route variables can be retrieved calling mux.Vars(request)) +func NewRouter(doc *openapi3.T) (routers.Router, error) { + type srv struct { + schemes []string + host, base string + server *openapi3.Server + } + servers := make([]srv, 0, len(doc.Servers)) + for _, server := range doc.Servers { + serverURL := server.URL + var schemes []string + var u *url.URL + var err error + if strings.Contains(serverURL, "://") { + scheme0 := strings.Split(serverURL, "://")[0] + schemes = permutePart(scheme0, server) + u, err = url.Parse(bEncode(strings.Replace(serverURL, scheme0+"://", schemes[0]+"://", 1))) + } else { + u, err = url.Parse(bEncode(serverURL)) + } + if err != nil { + return nil, err + } + path := bDecode(u.EscapedPath()) + if len(path) > 0 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + servers = append(servers, srv{ + host: bDecode(u.Host), //u.Hostname()? + base: path, + schemes: schemes, // scheme: []string{scheme0}, TODO: https://github.com/gorilla/mux/issues/624 + server: server, + }) + } + if len(servers) == 0 { + servers = append(servers, srv{}) + } + muxRouter := mux.NewRouter().UseEncodedPath() + r := &Router{} + for _, path := range orderedPaths(doc.Paths) { + pathItem := doc.Paths[path] + + operations := pathItem.Operations() + methods := make([]string, 0, len(operations)) + for method := range operations { + methods = append(methods, method) + } + sort.Strings(methods) + + for _, s := range servers { + muxRoute := muxRouter.Path(s.base + path).Methods(methods...) + if schemes := s.schemes; len(schemes) != 0 { + muxRoute.Schemes(schemes...) + } + if host := s.host; host != "" { + muxRoute.Host(host) + } + if err := muxRoute.GetError(); err != nil { + return nil, err + } + r.muxes = append(r.muxes, muxRoute) + r.routes = append(r.routes, &routers.Route{ + Spec: doc, + Server: s.server, + Path: path, + PathItem: pathItem, + Method: "", + Operation: nil, + }) + } + } + return r, nil +} + +// FindRoute extracts the route and parameters of an http.Request +func (r *Router) FindRoute(req *http.Request) (*routers.Route, map[string]string, error) { + for i, muxRoute := range r.muxes { + var match mux.RouteMatch + if muxRoute.Match(req, &match) { + if err := match.MatchErr; err != nil { + // What then? + } + route := *r.routes[i] + route.Method = req.Method + route.Operation = route.Spec.Paths[route.Path].GetOperation(route.Method) + return &route, match.Vars, nil + } + switch match.MatchErr { + case nil: + case mux.ErrMethodMismatch: + return nil, nil, routers.ErrMethodNotAllowed + default: // What then? + } + } + return nil, nil, routers.ErrPathNotFound +} + +func orderedPaths(paths map[string]*openapi3.PathItem) []string { + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#pathsObject + // When matching URLs, concrete (non-templated) paths would be matched + // before their templated counterparts. + // NOTE: sorting by number of variables ASC then by descending lexicographical + // order seems to be a good heuristic. + vars := make(map[int][]string) + max := 0 + for path := range paths { + count := strings.Count(path, "}") + vars[count] = append(vars[count], path) + if count > max { + max = count + } + } + ordered := make([]string, 0, len(paths)) + for c := 0; c <= max; c++ { + if ps, ok := vars[c]; ok { + sort.Sort(sort.Reverse(sort.StringSlice(ps))) + ordered = append(ordered, ps...) + } + } + return ordered +} + +// Magic strings that temporarily replace "{}" so net/url.Parse() works +var blURL, brURL = strings.Repeat("-", 50), strings.Repeat("_", 50) + +func bEncode(s string) string { + s = strings.Replace(s, "{", blURL, -1) + s = strings.Replace(s, "}", brURL, -1) + return s +} +func bDecode(s string) string { + s = strings.Replace(s, blURL, "{", -1) + s = strings.Replace(s, brURL, "}", -1) + return s +} + +func permutePart(part0 string, srv *openapi3.Server) []string { + type mapAndSlice struct { + m map[string]struct{} + s []string + } + var2val := make(map[string]mapAndSlice) + max := 0 + for name0, v := range srv.Variables { + name := "{" + name0 + "}" + if !strings.Contains(part0, name) { + continue + } + m := map[string]struct{}{v.Default: {}} + for _, value := range v.Enum { + m[value] = struct{}{} + } + if l := len(m); l > max { + max = l + } + s := make([]string, 0, len(m)) + for value := range m { + s = append(s, value) + } + var2val[name] = mapAndSlice{m: m, s: s} + } + if len(var2val) == 0 { + return []string{part0} + } + + partsMap := make(map[string]struct{}, max*len(var2val)) + for i := 0; i < max; i++ { + part := part0 + for name, mas := range var2val { + part = strings.Replace(part, name, mas.s[i%len(mas.s)], -1) + } + partsMap[part] = struct{}{} + } + parts := make([]string, 0, len(partsMap)) + for part := range partsMap { + parts = append(parts, part) + } + sort.Strings(parts) + return parts +} diff --git a/vendor/github.com/getkin/kin-openapi/routers/legacy/pathpattern/node.go b/vendor/github.com/getkin/kin-openapi/routers/legacy/pathpattern/node.go new file mode 100644 index 00000000000..862199864b9 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/routers/legacy/pathpattern/node.go @@ -0,0 +1,328 @@ +// Package pathpattern implements path matching. +// +// Examples of supported patterns: +// * "/" +// * "/abc"" +// * "/abc/{variable}" (matches until next '/' or end-of-string) +// * "/abc/{variable*}" (matches everything, including "/abc" if "/abc" has noot) +// * "/abc/{ variable | prefix_(.*}_suffix }" (matches regular expressions) +package pathpattern + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" +) + +var DefaultOptions = &Options{ + SupportWildcard: true, +} + +type Options struct { + SupportWildcard bool + SupportRegExp bool +} + +// PathFromHost converts a host pattern to a path pattern. +// +// Examples: +// * PathFromHost("some-subdomain.domain.com", false) -> "com/./domain/./some-subdomain" +// * PathFromHost("some-subdomain.domain.com", true) -> "com/./domain/./subdomain/-/some" +func PathFromHost(host string, specialDashes bool) string { + buf := make([]byte, 0, len(host)) + end := len(host) + + // Go from end to start + for start := end - 1; start >= 0; start-- { + switch host[start] { + case '.': + buf = append(buf, host[start+1:end]...) + buf = append(buf, '/', '.', '/') + end = start + case '-': + if specialDashes { + buf = append(buf, host[start+1:end]...) + buf = append(buf, '/', '-', '/') + end = start + } + } + } + buf = append(buf, host[:end]...) + return string(buf) +} + +type Node struct { + VariableNames []string + Value interface{} + Suffixes SuffixList +} + +func (currentNode *Node) String() string { + buf := bytes.NewBuffer(make([]byte, 0, 255)) + currentNode.toBuffer(buf, "") + return buf.String() +} + +func (currentNode *Node) toBuffer(buf *bytes.Buffer, linePrefix string) { + if value := currentNode.Value; value != nil { + buf.WriteString(linePrefix) + buf.WriteString("VALUE: ") + fmt.Fprint(buf, value) + buf.WriteString("\n") + } + suffixes := currentNode.Suffixes + if len(suffixes) > 0 { + newLinePrefix := linePrefix + " " + for _, suffix := range suffixes { + buf.WriteString(linePrefix) + buf.WriteString("PATTERN: ") + buf.WriteString(suffix.String()) + buf.WriteString("\n") + suffix.Node.toBuffer(buf, newLinePrefix) + } + } +} + +type SuffixKind int + +// Note that order is important! +const ( + // SuffixKindConstant matches a constant string + SuffixKindConstant = SuffixKind(iota) + + // SuffixKindRegExp matches a regular expression + SuffixKindRegExp + + // SuffixKindVariable matches everything until '/' + SuffixKindVariable + + // SuffixKindEverything matches everything (until end-of-string) + SuffixKindEverything +) + +// Suffix describes condition that +type Suffix struct { + Kind SuffixKind + Pattern string + + // compiled regular expression + regExp *regexp.Regexp + + // Next node + Node *Node +} + +func EqualSuffix(a, b Suffix) bool { + return a.Kind == b.Kind && a.Pattern == b.Pattern +} + +func (suffix Suffix) String() string { + switch suffix.Kind { + case SuffixKindConstant: + return suffix.Pattern + case SuffixKindVariable: + return "{_}" + case SuffixKindEverything: + return "{_*}" + default: + return "{_|" + suffix.Pattern + "}" + } +} + +type SuffixList []Suffix + +func (list SuffixList) Less(i, j int) bool { + a, b := list[i], list[j] + ak, bk := a.Kind, b.Kind + if ak < bk { + return true + } else if bk < ak { + return false + } + return a.Pattern > b.Pattern +} + +func (list SuffixList) Len() int { + return len(list) +} + +func (list SuffixList) Swap(i, j int) { + a, b := list[i], list[j] + list[i], list[j] = b, a +} + +func (currentNode *Node) MustAdd(path string, value interface{}, options *Options) { + node, err := currentNode.CreateNode(path, options) + if err != nil { + panic(err) + } + node.Value = value +} + +func (currentNode *Node) Add(path string, value interface{}, options *Options) error { + node, err := currentNode.CreateNode(path, options) + if err != nil { + return err + } + node.Value = value + return nil +} + +func (currentNode *Node) CreateNode(path string, options *Options) (*Node, error) { + if options == nil { + options = DefaultOptions + } + for strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + remaining := path + var variableNames []string +loop: + for { + //remaining = strings.TrimPrefix(remaining, "/") + if len(remaining) == 0 { + // This node is the right one + // Check whether another route already leads to this node + currentNode.VariableNames = variableNames + return currentNode, nil + } + + suffix := Suffix{} + var i int + if strings.HasPrefix(remaining, "/") { + remaining = remaining[1:] + suffix.Kind = SuffixKindConstant + suffix.Pattern = "/" + } else { + i = strings.IndexAny(remaining, "/{") + if i < 0 { + i = len(remaining) + } + if i > 0 { + // Constant string pattern + suffix.Kind = SuffixKindConstant + suffix.Pattern = remaining[:i] + remaining = remaining[i:] + } else if remaining[0] == '{' { + // This is probably a variable + suffix.Kind = SuffixKindVariable + + // Find variable name + i := strings.IndexByte(remaining, '}') + if i < 0 { + return nil, fmt.Errorf("missing '}' in: %s", path) + } + variableName := strings.TrimSpace(remaining[1:i]) + remaining = remaining[i+1:] + + if options.SupportRegExp { + // See if it has regular expression + i = strings.IndexByte(variableName, '|') + if i >= 0 { + suffix.Kind = SuffixKindRegExp + suffix.Pattern = strings.TrimSpace(variableName[i+1:]) + variableName = strings.TrimSpace(variableName[:i]) + } + } + if suffix.Kind == SuffixKindVariable && options.SupportWildcard { + if strings.HasSuffix(variableName, "*") { + suffix.Kind = SuffixKindEverything + } + } + variableNames = append(variableNames, variableName) + } + } + + // Find existing matcher + for _, existing := range currentNode.Suffixes { + if EqualSuffix(existing, suffix) { + currentNode = existing.Node + continue loop + } + } + + // Compile regular expression + if suffix.Kind == SuffixKindRegExp { + regExp, err := regexp.Compile(suffix.Pattern) + if err != nil { + return nil, fmt.Errorf("invalid regular expression in: %s", path) + } + suffix.regExp = regExp + } + + // Create new node + newNode := &Node{} + suffix.Node = newNode + currentNode.Suffixes = append(currentNode.Suffixes, suffix) + sort.Sort(currentNode.Suffixes) + currentNode = newNode + continue loop + } +} + +func (currentNode *Node) Match(path string) (*Node, []string) { + for strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + variableValues := make([]string, 0, 8) + return currentNode.matchRemaining(path, variableValues) +} + +func (currentNode *Node) matchRemaining(remaining string, paramValues []string) (*Node, []string) { + // Check if this node matches + if len(remaining) == 0 && currentNode.Value != nil { + return currentNode, paramValues + } + + // See if any suffix matches + for _, suffix := range currentNode.Suffixes { + var resultNode *Node + var resultValues []string + switch suffix.Kind { + case SuffixKindConstant: + pattern := suffix.Pattern + if strings.HasPrefix(remaining, pattern) { + newRemaining := remaining[len(pattern):] + resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, paramValues) + } else if len(remaining) == 0 && pattern == "/" { + resultNode, resultValues = suffix.Node.matchRemaining(remaining, paramValues) + } + case SuffixKindVariable: + i := strings.IndexByte(remaining, '/') + if i < 0 { + i = len(remaining) + } + newParamValues := append(paramValues, remaining[:i]) + newRemaining := remaining[i:] + resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, newParamValues) + case SuffixKindEverything: + newParamValues := append(paramValues, remaining) + resultNode, resultValues = suffix.Node, newParamValues + case SuffixKindRegExp: + i := strings.IndexByte(remaining, '/') + if i < 0 { + i = len(remaining) + } + paramValue := remaining[:i] + regExp := suffix.regExp + if regExp.MatchString(paramValue) { + matches := regExp.FindStringSubmatch(paramValue) + if len(matches) > 1 { + paramValue = matches[1] + } + newParamValues := append(paramValues, paramValue) + newRemaining := remaining[i:] + resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, newParamValues) + } + } + if resultNode != nil && resultNode.Value != nil { + // This suffix matched + return resultNode, resultValues + } + } + + // No suffix matched + return nil, nil +} diff --git a/vendor/github.com/getkin/kin-openapi/routers/legacy/router.go b/vendor/github.com/getkin/kin-openapi/routers/legacy/router.go new file mode 100644 index 00000000000..f1f47d9ed05 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/routers/legacy/router.go @@ -0,0 +1,167 @@ +// Package legacy implements a router. +// +// It differs from the gorilla/mux router: +// * it provides granular errors: "path not found", "method not allowed", "variable missing from path" +// * it does not handle matching routes with extensions (e.g. /books/{id}.json) +// * it handles path patterns with a different syntax (e.g. /params/{x}/{y}/{z.*}) +package legacy + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/routers" + "github.com/getkin/kin-openapi/routers/legacy/pathpattern" +) + +// Routers maps a HTTP request to a Router. +type Routers []*Router + +// FindRoute extracts the route and parameters of an http.Request +func (rs Routers) FindRoute(req *http.Request) (routers.Router, *routers.Route, map[string]string, error) { + for _, router := range rs { + // Skip routers that have DO NOT have servers + if len(router.doc.Servers) == 0 { + continue + } + route, pathParams, err := router.FindRoute(req) + if err == nil { + return router, route, pathParams, nil + } + } + for _, router := range rs { + // Skip routers that DO have servers + if len(router.doc.Servers) > 0 { + continue + } + route, pathParams, err := router.FindRoute(req) + if err == nil { + return router, route, pathParams, nil + } + } + return nil, nil, nil, &routers.RouteError{ + Reason: "none of the routers match", + } +} + +// Router maps a HTTP request to an OpenAPI operation. +type Router struct { + doc *openapi3.T + pathNode *pathpattern.Node +} + +// NewRouter creates a new router. +// +// If the given OpenAPIv3 document has servers, router will use them. +// All operations of the document will be added to the router. +func NewRouter(doc *openapi3.T) (routers.Router, error) { + if err := doc.Validate(context.Background()); err != nil { + return nil, fmt.Errorf("validating OpenAPI failed: %v", err) + } + router := &Router{doc: doc} + root := router.node() + for path, pathItem := range doc.Paths { + for method, operation := range pathItem.Operations() { + method = strings.ToUpper(method) + if err := root.Add(method+" "+path, &routers.Route{ + Spec: doc, + Path: path, + PathItem: pathItem, + Method: method, + Operation: operation, + }, nil); err != nil { + return nil, err + } + } + } + return router, nil +} + +// AddRoute adds a route in the router. +func (router *Router) AddRoute(route *routers.Route) error { + method := route.Method + if method == "" { + return errors.New("route is missing method") + } + method = strings.ToUpper(method) + path := route.Path + if path == "" { + return errors.New("route is missing path") + } + return router.node().Add(method+" "+path, router, nil) +} + +func (router *Router) node() *pathpattern.Node { + root := router.pathNode + if root == nil { + root = &pathpattern.Node{} + router.pathNode = root + } + return root +} + +// FindRoute extracts the route and parameters of an http.Request +func (router *Router) FindRoute(req *http.Request) (*routers.Route, map[string]string, error) { + method, url := req.Method, req.URL + doc := router.doc + + // Get server + servers := doc.Servers + var server *openapi3.Server + var remainingPath string + var pathParams map[string]string + if len(servers) == 0 { + remainingPath = url.Path + } else { + var paramValues []string + server, paramValues, remainingPath = servers.MatchURL(url) + if server == nil { + return nil, nil, &routers.RouteError{ + Reason: routers.ErrPathNotFound.Error(), + } + } + pathParams = make(map[string]string, 8) + paramNames, err := server.ParameterNames() + if err != nil { + return nil, nil, err + } + for i, value := range paramValues { + name := paramNames[i] + pathParams[name] = value + } + } + + // Get PathItem + root := router.node() + var route *routers.Route + node, paramValues := root.Match(method + " " + remainingPath) + if node != nil { + route, _ = node.Value.(*routers.Route) + } + if route == nil { + pathItem := doc.Paths[remainingPath] + if pathItem == nil { + return nil, nil, &routers.RouteError{Reason: routers.ErrPathNotFound.Error()} + } + if pathItem.GetOperation(method) == nil { + return nil, nil, &routers.RouteError{Reason: routers.ErrMethodNotAllowed.Error()} + } + } + + if pathParams == nil { + pathParams = make(map[string]string, len(paramValues)) + } + paramKeys := node.VariableNames + for i, value := range paramValues { + key := paramKeys[i] + if strings.HasSuffix(key, "*") { + key = key[:len(key)-1] + } + pathParams[key] = value + } + return route, pathParams, nil +} diff --git a/vendor/github.com/getkin/kin-openapi/routers/types.go b/vendor/github.com/getkin/kin-openapi/routers/types.go new file mode 100644 index 00000000000..93746cfe916 --- /dev/null +++ b/vendor/github.com/getkin/kin-openapi/routers/types.go @@ -0,0 +1,42 @@ +package routers + +import ( + "net/http" + + "github.com/getkin/kin-openapi/openapi3" +) + +// Router helps link http.Request.s and an OpenAPIv3 spec +type Router interface { + // FindRoute matches an HTTP request with the operation it resolves to. + // Hosts are matched from the OpenAPIv3 servers key. + // + // If you experience ErrPathNotFound and have localhost hosts specified as your servers, + // turning these server URLs as relative (leaving only the path) should resolve this. + // + // See openapi3filter for example uses with request and response validation. + FindRoute(req *http.Request) (route *Route, pathParams map[string]string, err error) +} + +// Route describes the operation an http.Request can match +type Route struct { + Spec *openapi3.T + Server *openapi3.Server + Path string + PathItem *openapi3.PathItem + Method string + Operation *openapi3.Operation +} + +// ErrPathNotFound is returned when no route match is found +var ErrPathNotFound error = &RouteError{"no matching operation was found"} + +// ErrMethodNotAllowed is returned when no method of the matched route matches +var ErrMethodNotAllowed error = &RouteError{"method not allowed"} + +// RouteError describes Router errors +type RouteError struct { + Reason string +} + +func (e *RouteError) Error() string { return e.Reason } diff --git a/vendor/github.com/getsentry/raven-go/.dockerignore b/vendor/github.com/getsentry/raven-go/.dockerignore new file mode 100644 index 00000000000..6b8710a711f --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/.dockerignore @@ -0,0 +1 @@ +.git diff --git a/vendor/github.com/getsentry/raven-go/.gitignore b/vendor/github.com/getsentry/raven-go/.gitignore new file mode 100644 index 00000000000..0f66ce75dc9 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/.gitignore @@ -0,0 +1,5 @@ +*.test +*.out +example/example +/xunit.xml +/coverage.xml diff --git a/vendor/github.com/getsentry/raven-go/.gitmodules b/vendor/github.com/getsentry/raven-go/.gitmodules new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/getsentry/raven-go/.travis.yml b/vendor/github.com/getsentry/raven-go/.travis.yml new file mode 100644 index 00000000000..8ec4eca8109 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/.travis.yml @@ -0,0 +1,41 @@ +sudo: false +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +before_install: + - go install -race std + - go get golang.org/x/tools/cmd/cover + - go get github.com/tebeka/go2xunit + - go get github.com/t-yuki/gocover-cobertura + - go get -v ./... + +script: + - go test -v -race ./... | tee gotest.out + - $GOPATH/bin/go2xunit -fail -input gotest.out -output xunit.xml + - go test -v -coverprofile=coverage.txt -covermode count . + - $GOPATH/bin/gocover-cobertura < coverage.txt > coverage.xml + +after_script: + - npm install -g @zeus-ci/cli + - zeus upload -t "application/x-cobertura+xml" coverage.xml + - zeus upload -t "application/x-xunit+xml" xunit.xml + +matrix: + allow_failures: + - go: tip + +notifications: + webhooks: + urls: + - https://zeus.ci/hooks/cd949996-d30a-11e8-ba53-0a580a28042d/public/provider/travis/webhook + on_success: always + on_failure: always + on_start: always + on_cancel: always + on_error: always diff --git a/vendor/github.com/getsentry/raven-go/LICENSE b/vendor/github.com/getsentry/raven-go/LICENSE new file mode 100644 index 00000000000..b0301b57e8a --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Apollic Software, LLC. All rights reserved. +Copyright (c) 2015 Functional Software, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Apollic Software, LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/getsentry/raven-go/README.md b/vendor/github.com/getsentry/raven-go/README.md new file mode 100644 index 00000000000..16c9483e8a2 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/README.md @@ -0,0 +1,19 @@ +# raven + +[![Build Status](https://api.travis-ci.org/getsentry/raven-go.svg?branch=master)](https://travis-ci.org/getsentry/raven-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/getsentry/raven-go)](https://goreportcard.com/report/github.com/getsentry/raven-go) +[![GoDoc](https://godoc.org/github.com/getsentry/raven-go?status.svg)](https://godoc.org/github.com/getsentry/raven-go) + +raven is the official Go SDK for the [Sentry](https://github.com/getsentry/sentry) +event/error logging system. + +- [**API Documentation**](https://godoc.org/github.com/getsentry/raven-go) +- [**Usage and Examples**](https://docs.sentry.io/clients/go/) + +## Installation + +```text +go get github.com/getsentry/raven-go +``` + +Note: Go 1.7 and newer are supported. diff --git a/vendor/github.com/getsentry/raven-go/client.go b/vendor/github.com/getsentry/raven-go/client.go new file mode 100644 index 00000000000..a2c9a6c35d2 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/client.go @@ -0,0 +1,977 @@ +// Package raven implements a client for the Sentry error logging service. +package raven + +import ( + "bytes" + "compress/zlib" + "crypto/rand" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + mrand "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/certifi/gocertifi" + pkgErrors "github.com/pkg/errors" +) + +const ( + userAgent = "raven-go/1.0" + timestampFormat = `"2006-01-02T15:04:05.00"` +) + +var ( + ErrPacketDropped = errors.New("raven: packet dropped") + ErrUnableToUnmarshalJSON = errors.New("raven: unable to unmarshal JSON") + ErrMissingUser = errors.New("raven: dsn missing public key and/or password") + ErrMissingProjectID = errors.New("raven: dsn missing project id") + ErrInvalidSampleRate = errors.New("raven: sample rate should be between 0 and 1") +) + +type Severity string + +// http://docs.python.org/2/howto/logging.html#logging-levels +const ( + DEBUG = Severity("debug") + INFO = Severity("info") + WARNING = Severity("warning") + ERROR = Severity("error") + FATAL = Severity("fatal") +) + +type Timestamp time.Time + +func (t Timestamp) MarshalJSON() ([]byte, error) { + return []byte(time.Time(t).UTC().Format(timestampFormat)), nil +} + +func (timestamp *Timestamp) UnmarshalJSON(data []byte) error { + t, err := time.Parse(timestampFormat, string(data)) + if err != nil { + return err + } + + *timestamp = Timestamp(t) + return nil +} + +func (timestamp Timestamp) Format(format string) string { + t := time.Time(timestamp) + return t.Format(format) +} + +// An Interface is a Sentry interface that will be serialized as JSON. +// It must implement json.Marshaler or use json struct tags. +type Interface interface { + // The Sentry class name. Example: sentry.interfaces.Stacktrace + Class() string +} + +type Culpriter interface { + Culprit() string +} + +type Transport interface { + Send(url, authHeader string, packet *Packet) error +} + +type Extra map[string]interface{} + +type outgoingPacket struct { + packet *Packet + ch chan error +} + +type Tag struct { + Key string + Value string +} + +type Tags []Tag + +func (tag *Tag) MarshalJSON() ([]byte, error) { + return json.Marshal([2]string{tag.Key, tag.Value}) +} + +func (t *Tag) UnmarshalJSON(data []byte) error { + var tag [2]string + if err := json.Unmarshal(data, &tag); err != nil { + return err + } + *t = Tag{tag[0], tag[1]} + return nil +} + +func (t *Tags) UnmarshalJSON(data []byte) error { + var tags []Tag + + switch data[0] { + case '[': + // Unmarshal into []Tag + if err := json.Unmarshal(data, &tags); err != nil { + return err + } + case '{': + // Unmarshal into map[string]string + tagMap := make(map[string]string) + if err := json.Unmarshal(data, &tagMap); err != nil { + return err + } + + // Convert to []Tag + for k, v := range tagMap { + tags = append(tags, Tag{k, v}) + } + default: + return ErrUnableToUnmarshalJSON + } + + *t = tags + return nil +} + +// https://docs.getsentry.com/hosted/clientdev/#building-the-json-packet +type Packet struct { + // Required + Message string `json:"message"` + + // Required, set automatically by Client.Send/Report via Packet.Init if blank + EventID string `json:"event_id"` + Project string `json:"project"` + Timestamp Timestamp `json:"timestamp"` + Level Severity `json:"level"` + Logger string `json:"logger"` + + // Optional + Platform string `json:"platform,omitempty"` + Culprit string `json:"culprit,omitempty"` + ServerName string `json:"server_name,omitempty"` + Release string `json:"release,omitempty"` + Environment string `json:"environment,omitempty"` + Tags Tags `json:"tags,omitempty"` + Modules map[string]string `json:"modules,omitempty"` + Fingerprint []string `json:"fingerprint,omitempty"` + Extra Extra `json:"extra,omitempty"` + + Interfaces []Interface `json:"-"` +} + +// NewPacket constructs a packet with the specified message and interfaces. +func NewPacket(message string, interfaces ...Interface) *Packet { + extra := Extra{} + setExtraDefaults(extra) + return &Packet{ + Message: message, + Interfaces: interfaces, + Extra: extra, + } +} + +// NewPacketWithExtra constructs a packet with the specified message, extra information, and interfaces. +func NewPacketWithExtra(message string, extra Extra, interfaces ...Interface) *Packet { + if extra == nil { + extra = Extra{} + } + setExtraDefaults(extra) + + return &Packet{ + Message: message, + Interfaces: interfaces, + Extra: extra, + } +} + +func setExtraDefaults(extra Extra) Extra { + extra["runtime.Version"] = runtime.Version() + extra["runtime.NumCPU"] = runtime.NumCPU() + extra["runtime.GOMAXPROCS"] = runtime.GOMAXPROCS(0) // 0 just returns the current value + extra["runtime.NumGoroutine"] = runtime.NumGoroutine() + return extra +} + +// Init initializes required fields in a packet. It is typically called by +// Client.Send/Report automatically. +func (packet *Packet) Init(project string) error { + if packet.Project == "" { + packet.Project = project + } + if packet.EventID == "" { + var err error + packet.EventID, err = uuid() + if err != nil { + return err + } + } + if time.Time(packet.Timestamp).IsZero() { + packet.Timestamp = Timestamp(time.Now()) + } + if packet.Level == "" { + packet.Level = ERROR + } + if packet.Logger == "" { + packet.Logger = "root" + } + if packet.ServerName == "" { + packet.ServerName = hostname + } + if packet.Platform == "" { + packet.Platform = "go" + } + + if packet.Culprit == "" { + for _, inter := range packet.Interfaces { + if c, ok := inter.(Culpriter); ok { + packet.Culprit = c.Culprit() + if packet.Culprit != "" { + break + } + } + } + } + + return nil +} + +func (packet *Packet) AddTags(tags map[string]string) { + for k, v := range tags { + packet.Tags = append(packet.Tags, Tag{k, v}) + } +} + +func uuid() (string, error) { + id := make([]byte, 16) + _, err := io.ReadFull(rand.Reader, id) + if err != nil { + return "", err + } + id[6] &= 0x0F // clear version + id[6] |= 0x40 // set version to 4 (random uuid) + id[8] &= 0x3F // clear variant + id[8] |= 0x80 // set to IETF variant + return hex.EncodeToString(id), nil +} + +func (packet *Packet) JSON() ([]byte, error) { + packetJSON, err := json.Marshal(packet) + if err != nil { + return nil, err + } + + interfaces := make(map[string]Interface, len(packet.Interfaces)) + for _, inter := range packet.Interfaces { + if inter != nil { + interfaces[inter.Class()] = inter + } + } + + if len(interfaces) > 0 { + interfaceJSON, err := json.Marshal(interfaces) + if err != nil { + return nil, err + } + packetJSON[len(packetJSON)-1] = ',' + packetJSON = append(packetJSON, interfaceJSON[1:]...) + } + + return packetJSON, nil +} + +type context struct { + user *User + http *Http + tags map[string]string +} + +func (c *context) setUser(u *User) { c.user = u } +func (c *context) setHttp(h *Http) { c.http = h } +func (c *context) setTags(t map[string]string) { + if c.tags == nil { + c.tags = make(map[string]string) + } + for k, v := range t { + c.tags[k] = v + } +} +func (c *context) clear() { + c.user = nil + c.http = nil + c.tags = nil +} + +// Return a list of interfaces to be used in appending with the rest +func (c *context) interfaces() []Interface { + len, i := 0, 0 + if c.user != nil { + len++ + } + if c.http != nil { + len++ + } + interfaces := make([]Interface, len) + if c.user != nil { + interfaces[i] = c.user + i++ + } + if c.http != nil { + interfaces[i] = c.http + i++ + } + return interfaces +} + +// The maximum number of packets that will be buffered waiting to be delivered. +// Packets will be dropped if the buffer is full. Used by NewClient. +var MaxQueueBuffer = 100 + +func newTransport() Transport { + t := &HTTPTransport{} + rootCAs, err := gocertifi.CACerts() + if err != nil { + log.Println("raven: failed to load root TLS certificates:", err) + } else { + t.Client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{RootCAs: rootCAs}, + }, + } + } + return t +} + +func newClient(tags map[string]string) *Client { + client := &Client{ + Transport: newTransport(), + Tags: tags, + context: &context{}, + sampleRate: 1.0, + queue: make(chan *outgoingPacket, MaxQueueBuffer), + } + client.SetDSN(os.Getenv("SENTRY_DSN")) + client.SetRelease(os.Getenv("SENTRY_RELEASE")) + client.SetEnvironment(os.Getenv("SENTRY_ENVIRONMENT")) + return client +} + +// New constructs a new Sentry client instance +func New(dsn string) (*Client, error) { + client := newClient(nil) + return client, client.SetDSN(dsn) +} + +// NewWithTags constructs a new Sentry client instance with default tags. +func NewWithTags(dsn string, tags map[string]string) (*Client, error) { + client := newClient(tags) + return client, client.SetDSN(dsn) +} + +// NewClient constructs a Sentry client and spawns a background goroutine to +// handle packets sent by Client.Report. +// +// Deprecated: use New and NewWithTags instead +func NewClient(dsn string, tags map[string]string) (*Client, error) { + client := newClient(tags) + return client, client.SetDSN(dsn) +} + +// Client encapsulates a connection to a Sentry server. It must be initialized +// by calling NewClient. Modification of fields concurrently with Send or after +// calling Report for the first time is not thread-safe. +type Client struct { + Tags map[string]string + + Transport Transport + + // DropHandler is called when a packet is dropped because the buffer is full. + DropHandler func(*Packet) + + // Context that will get appending to all packets + context *context + + mu sync.RWMutex + url string + projectID string + authHeader string + release string + environment string + sampleRate float32 + + // default logger name (leave empty for 'root') + defaultLoggerName string + + includePaths []string + ignoreErrorsRegexp *regexp.Regexp + queue chan *outgoingPacket + + // A WaitGroup to keep track of all currently in-progress captures + // This is intended to be used with Client.Wait() to assure that + // all messages have been transported before exiting the process. + wg sync.WaitGroup + + // A Once to track only starting up the background worker once + start sync.Once +} + +// Initialize a default *Client instance +var DefaultClient = newClient(nil) + +func (c *Client) SetIgnoreErrors(errs []string) error { + joinedRegexp := strings.Join(errs, "|") + r, err := regexp.Compile(joinedRegexp) + if err != nil { + return fmt.Errorf("failed to compile regexp %q for %q: %v", joinedRegexp, errs, err) + } + + c.mu.Lock() + c.ignoreErrorsRegexp = r + c.mu.Unlock() + return nil +} + +func (c *Client) shouldExcludeErr(errStr string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ignoreErrorsRegexp != nil && c.ignoreErrorsRegexp.MatchString(errStr) +} + +func SetIgnoreErrors(errs ...string) error { + return DefaultClient.SetIgnoreErrors(errs) +} + +// SetDSN updates a client with a new DSN. It safe to call after and +// concurrently with calls to Report and Send. +func (client *Client) SetDSN(dsn string) error { + if dsn == "" { + return nil + } + + client.mu.Lock() + defer client.mu.Unlock() + + uri, err := url.Parse(dsn) + if err != nil { + return err + } + + if uri.User == nil { + return ErrMissingUser + } + publicKey := uri.User.Username() + secretKey, hasSecretKey := uri.User.Password() + uri.User = nil + + if idx := strings.LastIndex(uri.Path, "/"); idx != -1 { + client.projectID = uri.Path[idx+1:] + uri.Path = uri.Path[:idx+1] + "api/" + client.projectID + "/store/" + } + if client.projectID == "" { + return ErrMissingProjectID + } + + client.url = uri.String() + + if hasSecretKey { + client.authHeader = fmt.Sprintf("Sentry sentry_version=4, sentry_key=%s, sentry_secret=%s", publicKey, secretKey) + } else { + client.authHeader = fmt.Sprintf("Sentry sentry_version=4, sentry_key=%s", publicKey) + } + + return nil +} + +// Sets the DSN for the default *Client instance +func SetDSN(dsn string) error { return DefaultClient.SetDSN(dsn) } + +// SetRelease sets the "release" tag. +func (client *Client) SetRelease(release string) { + client.mu.Lock() + defer client.mu.Unlock() + client.release = release +} + +// SetEnvironment sets the "environment" tag. +func (client *Client) SetEnvironment(environment string) { + client.mu.Lock() + defer client.mu.Unlock() + client.environment = environment +} + +// SetDefaultLoggerName sets the default logger name. +func (client *Client) SetDefaultLoggerName(name string) { + client.mu.Lock() + defer client.mu.Unlock() + client.defaultLoggerName = name +} + +// SetSampleRate sets how much sampling we want on client side +func (client *Client) SetSampleRate(rate float32) error { + client.mu.Lock() + defer client.mu.Unlock() + + if rate < 0 || rate > 1 { + return ErrInvalidSampleRate + } + client.sampleRate = rate + return nil +} + +// SetRelease sets the "release" tag on the default *Client +func SetRelease(release string) { DefaultClient.SetRelease(release) } + +// SetEnvironment sets the "environment" tag on the default *Client +func SetEnvironment(environment string) { DefaultClient.SetEnvironment(environment) } + +// SetDefaultLoggerName sets the "defaultLoggerName" on the default *Client +func SetDefaultLoggerName(name string) { + DefaultClient.SetDefaultLoggerName(name) +} + +// SetSampleRate sets the "sample rate" on the degault *Client +func SetSampleRate(rate float32) error { return DefaultClient.SetSampleRate(rate) } + +func (client *Client) worker() { + for outgoingPacket := range client.queue { + + client.mu.RLock() + url, authHeader := client.url, client.authHeader + client.mu.RUnlock() + + outgoingPacket.ch <- client.Transport.Send(url, authHeader, outgoingPacket.packet) + client.wg.Done() + } +} + +// Capture asynchronously delivers a packet to the Sentry server. It is a no-op +// when client is nil. A channel is provided if it is important to check for a +// send's success. +func (client *Client) Capture(packet *Packet, captureTags map[string]string) (eventID string, ch chan error) { + ch = make(chan error, 1) + + if client == nil { + // return a chan that always returns nil when the caller receives from it + close(ch) + return + } + + if client.sampleRate < 1.0 && mrand.Float32() > client.sampleRate { + return + } + + if packet == nil { + close(ch) + return + } + + if client.shouldExcludeErr(packet.Message) { + return + } + + // Keep track of all running Captures so that we can wait for them all to finish + // *Must* call client.wg.Done() on any path that indicates that an event was + // finished being acted upon, whether success or failure + client.wg.Add(1) + + // Merge capture tags and client tags + packet.AddTags(captureTags) + packet.AddTags(client.Tags) + + // Initialize any required packet fields + client.mu.RLock() + packet.AddTags(client.context.tags) + projectID := client.projectID + release := client.release + environment := client.environment + defaultLoggerName := client.defaultLoggerName + client.mu.RUnlock() + + // set the global logger name on the packet if we must + if packet.Logger == "" && defaultLoggerName != "" { + packet.Logger = defaultLoggerName + } + + err := packet.Init(projectID) + if err != nil { + ch <- err + client.wg.Done() + return + } + + if packet.Release == "" { + packet.Release = release + } + + if packet.Environment == "" { + packet.Environment = environment + } + + outgoingPacket := &outgoingPacket{packet, ch} + + // Lazily start background worker until we + // do our first write into the queue. + client.start.Do(func() { + go client.worker() + }) + + select { + case client.queue <- outgoingPacket: + default: + // Send would block, drop the packet + if client.DropHandler != nil { + client.DropHandler(packet) + } + ch <- ErrPacketDropped + client.wg.Done() + } + + return packet.EventID, ch +} + +// Capture asynchronously delivers a packet to the Sentry server with the default *Client. +// It is a no-op when client is nil. A channel is provided if it is important to check for a +// send's success. +func Capture(packet *Packet, captureTags map[string]string) (eventID string, ch chan error) { + return DefaultClient.Capture(packet, captureTags) +} + +// CaptureMessage formats and delivers a string message to the Sentry server. +func (client *Client) CaptureMessage(message string, tags map[string]string, interfaces ...Interface) string { + if client == nil { + return "" + } + + if client.shouldExcludeErr(message) { + return "" + } + + packet := NewPacket(message, append(append(interfaces, client.context.interfaces()...), &Message{message, nil})...) + eventID, _ := client.Capture(packet, tags) + + return eventID +} + +// CaptureMessage formats and delivers a string message to the Sentry server with the default *Client +func CaptureMessage(message string, tags map[string]string, interfaces ...Interface) string { + return DefaultClient.CaptureMessage(message, tags, interfaces...) +} + +// CaptureMessageAndWait is identical to CaptureMessage except it blocks and waits for the message to be sent. +func (client *Client) CaptureMessageAndWait(message string, tags map[string]string, interfaces ...Interface) string { + if client == nil { + return "" + } + + if client.shouldExcludeErr(message) { + return "" + } + + packet := NewPacket(message, append(append(interfaces, client.context.interfaces()...), &Message{message, nil})...) + eventID, ch := client.Capture(packet, tags) + if eventID != "" { + <-ch + } + + return eventID +} + +// CaptureMessageAndWait is identical to CaptureMessage except it blocks and waits for the message to be sent. +func CaptureMessageAndWait(message string, tags map[string]string, interfaces ...Interface) string { + return DefaultClient.CaptureMessageAndWait(message, tags, interfaces...) +} + +// CaptureErrors formats and delivers an error to the Sentry server. +// Adds a stacktrace to the packet, excluding the call to this method. +func (client *Client) CaptureError(err error, tags map[string]string, interfaces ...Interface) string { + if client == nil { + return "" + } + + if err == nil { + return "" + } + + if client.shouldExcludeErr(err.Error()) { + return "" + } + + extra := extractExtra(err) + cause := pkgErrors.Cause(err) + + packet := NewPacketWithExtra(err.Error(), extra, append(append(interfaces, client.context.interfaces()...), NewException(cause, GetOrNewStacktrace(cause, 1, 3, client.includePaths)))...) + eventID, _ := client.Capture(packet, tags) + + return eventID +} + +// CaptureErrors formats and delivers an error to the Sentry server using the default *Client. +// Adds a stacktrace to the packet, excluding the call to this method. +func CaptureError(err error, tags map[string]string, interfaces ...Interface) string { + return DefaultClient.CaptureError(err, tags, interfaces...) +} + +// CaptureErrorAndWait is identical to CaptureError, except it blocks and assures that the event was sent +func (client *Client) CaptureErrorAndWait(err error, tags map[string]string, interfaces ...Interface) string { + if client == nil { + return "" + } + + if client.shouldExcludeErr(err.Error()) { + return "" + } + + extra := extractExtra(err) + cause := pkgErrors.Cause(err) + + packet := NewPacketWithExtra(err.Error(), extra, append(append(interfaces, client.context.interfaces()...), NewException(cause, GetOrNewStacktrace(cause, 1, 3, client.includePaths)))...) + eventID, ch := client.Capture(packet, tags) + if eventID != "" { + <-ch + } + + return eventID +} + +// CaptureErrorAndWait is identical to CaptureError, except it blocks and assures that the event was sent +func CaptureErrorAndWait(err error, tags map[string]string, interfaces ...Interface) string { + return DefaultClient.CaptureErrorAndWait(err, tags, interfaces...) +} + +// CapturePanic calls f and then recovers and reports a panic to the Sentry server if it occurs. +// If an error is captured, both the error and the reported Sentry error ID are returned. +func (client *Client) CapturePanic(f func(), tags map[string]string, interfaces ...Interface) (err interface{}, errorID string) { + // Note: This doesn't need to check for client, because we still want to go through the defer/recover path + // Down the line, Capture will be noop'd, so while this does a _tiny_ bit of overhead constructing the + // *Packet just to be thrown away, this should not be the normal case. Could be refactored to + // be completely noop though if we cared. + defer func() { + var packet *Packet + err = recover() + switch rval := err.(type) { + case nil: + return + case error: + if client.shouldExcludeErr(rval.Error()) { + return + } + packet = NewPacket(rval.Error(), append(append(interfaces, client.context.interfaces()...), NewException(rval, NewStacktrace(2, 3, client.includePaths)))...) + default: + rvalStr := fmt.Sprint(rval) + if client.shouldExcludeErr(rvalStr) { + return + } + packet = NewPacket(rvalStr, append(append(interfaces, client.context.interfaces()...), NewException(errors.New(rvalStr), NewStacktrace(2, 3, client.includePaths)))...) + } + + errorID, _ = client.Capture(packet, tags) + }() + + f() + return +} + +// CapturePanic calls f and then recovers and reports a panic to the Sentry server if it occurs. +// If an error is captured, both the error and the reported Sentry error ID are returned. +func CapturePanic(f func(), tags map[string]string, interfaces ...Interface) (interface{}, string) { + return DefaultClient.CapturePanic(f, tags, interfaces...) +} + +// CapturePanicAndWait is identical to CaptureError, except it blocks and assures that the event was sent +func (client *Client) CapturePanicAndWait(f func(), tags map[string]string, interfaces ...Interface) (err interface{}, errorID string) { + // Note: This doesn't need to check for client, because we still want to go through the defer/recover path + // Down the line, Capture will be noop'd, so while this does a _tiny_ bit of overhead constructing the + // *Packet just to be thrown away, this should not be the normal case. Could be refactored to + // be completely noop though if we cared. + defer func() { + var packet *Packet + err = recover() + switch rval := err.(type) { + case nil: + return + case error: + if client.shouldExcludeErr(rval.Error()) { + return + } + packet = NewPacket(rval.Error(), append(append(interfaces, client.context.interfaces()...), NewException(rval, NewStacktrace(2, 3, client.includePaths)))...) + default: + rvalStr := fmt.Sprint(rval) + if client.shouldExcludeErr(rvalStr) { + return + } + packet = NewPacket(rvalStr, append(append(interfaces, client.context.interfaces()...), NewException(errors.New(rvalStr), NewStacktrace(2, 3, client.includePaths)))...) + } + + var ch chan error + errorID, ch = client.Capture(packet, tags) + if errorID != "" { + <-ch + } + }() + + f() + return +} + +// CapturePanicAndWait is identical to CaptureError, except it blocks and assures that the event was sent +func CapturePanicAndWait(f func(), tags map[string]string, interfaces ...Interface) (interface{}, string) { + return DefaultClient.CapturePanicAndWait(f, tags, interfaces...) +} + +func (client *Client) Close() { + close(client.queue) +} + +func Close() { DefaultClient.Close() } + +// Wait blocks and waits for all events to finish being sent to Sentry server +func (client *Client) Wait() { + client.wg.Wait() +} + +// Wait blocks and waits for all events to finish being sent to Sentry server +func Wait() { DefaultClient.Wait() } + +func (client *Client) URL() string { + client.mu.RLock() + defer client.mu.RUnlock() + + return client.url +} + +func URL() string { return DefaultClient.URL() } + +func (client *Client) ProjectID() string { + client.mu.RLock() + defer client.mu.RUnlock() + + return client.projectID +} + +func ProjectID() string { return DefaultClient.ProjectID() } + +func (client *Client) Release() string { + client.mu.RLock() + defer client.mu.RUnlock() + + return client.release +} + +func Release() string { return DefaultClient.Release() } + +func IncludePaths() []string { return DefaultClient.IncludePaths() } + +func (client *Client) IncludePaths() []string { + client.mu.RLock() + defer client.mu.RUnlock() + + return client.includePaths +} + +func SetIncludePaths(p []string) { DefaultClient.SetIncludePaths(p) } + +func (client *Client) SetIncludePaths(p []string) { + client.mu.Lock() + defer client.mu.Unlock() + + client.includePaths = p +} + +func (c *Client) SetUserContext(u *User) { + c.mu.Lock() + defer c.mu.Unlock() + c.context.setUser(u) +} + +func (c *Client) SetHttpContext(h *Http) { + c.mu.Lock() + defer c.mu.Unlock() + c.context.setHttp(h) +} + +func (c *Client) SetTagsContext(t map[string]string) { + c.mu.Lock() + defer c.mu.Unlock() + c.context.setTags(t) +} + +func (c *Client) ClearContext() { + c.mu.Lock() + defer c.mu.Unlock() + c.context.clear() +} + +func SetUserContext(u *User) { DefaultClient.SetUserContext(u) } +func SetHttpContext(h *Http) { DefaultClient.SetHttpContext(h) } +func SetTagsContext(t map[string]string) { DefaultClient.SetTagsContext(t) } +func ClearContext() { DefaultClient.ClearContext() } + +// HTTPTransport is the default transport, delivering packets to Sentry via the +// HTTP API. +type HTTPTransport struct { + *http.Client +} + +func (t *HTTPTransport) Send(url, authHeader string, packet *Packet) error { + if url == "" { + return nil + } + + body, contentType, err := serializedPacket(packet) + if err != nil { + return fmt.Errorf("error serializing packet: %v", err) + } + req, err := http.NewRequest("POST", url, body) + if err != nil { + return fmt.Errorf("can't create new request: %v", err) + } + req.Header.Set("X-Sentry-Auth", authHeader) + req.Header.Set("User-Agent", userAgent) + req.Header.Set("Content-Type", contentType) + res, err := t.Do(req) + if err != nil { + return err + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("raven: got http status %d - x-sentry-error: %s", res.StatusCode, res.Header.Get("X-Sentry-Error")) + } + return nil +} + +func serializedPacket(packet *Packet) (io.Reader, string, error) { + packetJSON, err := packet.JSON() + if err != nil { + return nil, "", fmt.Errorf("error marshaling packet %+v to JSON: %v", packet, err) + } + + // Only deflate/base64 the packet if it is bigger than 1KB, as there is + // overhead. + if len(packetJSON) > 1000 { + buf := &bytes.Buffer{} + b64 := base64.NewEncoder(base64.StdEncoding, buf) + deflate, _ := zlib.NewWriterLevel(b64, zlib.BestCompression) + deflate.Write(packetJSON) + deflate.Close() + b64.Close() + return buf, "application/octet-stream", nil + } + return bytes.NewReader(packetJSON), "application/json", nil +} + +var hostname string + +func init() { + hostname, _ = os.Hostname() +} diff --git a/vendor/github.com/getsentry/raven-go/errors.go b/vendor/github.com/getsentry/raven-go/errors.go new file mode 100644 index 00000000000..5e57270436d --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/errors.go @@ -0,0 +1,60 @@ +package raven + +type causer interface { + Cause() error +} + +type errWrappedWithExtra struct { + err error + extraInfo map[string]interface{} +} + +func (ewx *errWrappedWithExtra) Error() string { + return ewx.err.Error() +} + +func (ewx *errWrappedWithExtra) Cause() error { + return ewx.err +} + +func (ewx *errWrappedWithExtra) ExtraInfo() Extra { + return ewx.extraInfo +} + +// Adds extra data to an error before reporting to Sentry +func WrapWithExtra(err error, extraInfo map[string]interface{}) error { + return &errWrappedWithExtra{ + err: err, + extraInfo: extraInfo, + } +} + +type ErrWithExtra interface { + Error() string + Cause() error + ExtraInfo() Extra +} + +// Iteratively fetches all the Extra data added to an error, +// and it's underlying errors. Extra data defined first is +// respected, and is not overridden when extracting. +func extractExtra(err error) Extra { + extra := Extra{} + + currentErr := err + for currentErr != nil { + if errWithExtra, ok := currentErr.(ErrWithExtra); ok { + for k, v := range errWithExtra.ExtraInfo() { + extra[k] = v + } + } + + if errWithCause, ok := currentErr.(causer); ok { + currentErr = errWithCause.Cause() + } else { + currentErr = nil + } + } + + return extra +} diff --git a/vendor/github.com/getsentry/raven-go/exception.go b/vendor/github.com/getsentry/raven-go/exception.go new file mode 100644 index 00000000000..552eaad128c --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/exception.go @@ -0,0 +1,50 @@ +package raven + +import ( + "reflect" + "regexp" +) + +var errorMsgPattern = regexp.MustCompile(`\A(\w+): (.+)\z`) + +func NewException(err error, stacktrace *Stacktrace) *Exception { + msg := err.Error() + ex := &Exception{ + Stacktrace: stacktrace, + Value: msg, + Type: reflect.TypeOf(err).String(), + } + if m := errorMsgPattern.FindStringSubmatch(msg); m != nil { + ex.Module, ex.Value = m[1], m[2] + } + return ex +} + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#failure-interfaces +type Exception struct { + // Required + Value string `json:"value"` + + // Optional + Type string `json:"type,omitempty"` + Module string `json:"module,omitempty"` + Stacktrace *Stacktrace `json:"stacktrace,omitempty"` +} + +func (e *Exception) Class() string { return "exception" } + +func (e *Exception) Culprit() string { + if e.Stacktrace == nil { + return "" + } + return e.Stacktrace.Culprit() +} + +// Exceptions allows for chained errors +// https://docs.sentry.io/clientdev/interfaces/exception/ +type Exceptions struct { + // Required + Values []*Exception `json:"values"` +} + +func (es Exceptions) Class() string { return "exception" } diff --git a/vendor/github.com/getsentry/raven-go/http.go b/vendor/github.com/getsentry/raven-go/http.go new file mode 100644 index 00000000000..ae8f47234c1 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/http.go @@ -0,0 +1,99 @@ +package raven + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/url" + "runtime/debug" + "strings" +) + +func NewHttp(req *http.Request) *Http { + proto := "http" + if req.TLS != nil || req.Header.Get("X-Forwarded-Proto") == "https" { + proto = "https" + } + h := &Http{ + Method: req.Method, + Cookies: req.Header.Get("Cookie"), + Query: sanitizeQuery(req.URL.Query()).Encode(), + URL: proto + "://" + req.Host + req.URL.Path, + Headers: make(map[string]string, len(req.Header)), + } + if addr, port, err := net.SplitHostPort(req.RemoteAddr); err == nil { + h.Env = map[string]string{"REMOTE_ADDR": addr, "REMOTE_PORT": port} + } + for k, v := range req.Header { + h.Headers[k] = strings.Join(v, ",") + } + h.Headers["Host"] = req.Host + return h +} + +var querySecretFields = []string{"password", "passphrase", "passwd", "secret"} + +func sanitizeQuery(query url.Values) url.Values { + for _, keyword := range querySecretFields { + for field := range query { + if strings.Contains(field, keyword) { + query[field] = []string{"********"} + } + } + } + return query +} + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#context-interfaces +type Http struct { + // Required + URL string `json:"url"` + Method string `json:"method"` + Query string `json:"query_string,omitempty"` + + // Optional + Cookies string `json:"cookies,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Env map[string]string `json:"env,omitempty"` + + // Must be either a string or map[string]string + Data interface{} `json:"data,omitempty"` +} + +func (h *Http) Class() string { return "request" } + +// Recovery handler to wrap the stdlib net/http Mux. +// Example: +// http.HandleFunc("/", raven.RecoveryHandler(func(w http.ResponseWriter, r *http.Request) { +// ... +// })) +func RecoveryHandler(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { + return Recoverer(http.HandlerFunc(handler)).ServeHTTP +} + +// Recovery handler to wrap the stdlib net/http Mux. +// Example: +// mux := http.NewServeMux +// ... +// http.Handle("/", raven.Recoverer(mux)) +func Recoverer(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rval := recover(); rval != nil { + debug.PrintStack() + rvalStr := fmt.Sprint(rval) + var packet *Packet + if err, ok := rval.(error); ok { + packet = NewPacket(rvalStr, NewException(errors.New(rvalStr), GetOrNewStacktrace(err, 2, 3, nil)), NewHttp(r)) + } else { + packet = NewPacket(rvalStr, NewException(errors.New(rvalStr), NewStacktrace(2, 3, nil)), NewHttp(r)) + } + Capture(packet, nil) + w.WriteHeader(http.StatusInternalServerError) + } + }() + + handler.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/getsentry/raven-go/interfaces.go b/vendor/github.com/getsentry/raven-go/interfaces.go new file mode 100644 index 00000000000..a05dc3de472 --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/interfaces.go @@ -0,0 +1,49 @@ +package raven + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#message-interface +type Message struct { + // Required + Message string `json:"message"` + + // Optional + Params []interface{} `json:"params,omitempty"` +} + +func (m *Message) Class() string { return "logentry" } + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#template-interface +type Template struct { + // Required + Filename string `json:"filename"` + Lineno int `json:"lineno"` + ContextLine string `json:"context_line"` + + // Optional + PreContext []string `json:"pre_context,omitempty"` + PostContext []string `json:"post_context,omitempty"` + AbsolutePath string `json:"abs_path,omitempty"` +} + +func (t *Template) Class() string { return "template" } + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#context-interfaces +type User struct { + // All fields are optional + ID string `json:"id,omitempty"` + Username string `json:"username,omitempty"` + Email string `json:"email,omitempty"` + IP string `json:"ip_address,omitempty"` +} + +func (h *User) Class() string { return "user" } + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#context-interfaces +type Query struct { + // Required + Query string `json:"query"` + + // Optional + Engine string `json:"engine,omitempty"` +} + +func (q *Query) Class() string { return "query" } diff --git a/vendor/github.com/getsentry/raven-go/runtests.sh b/vendor/github.com/getsentry/raven-go/runtests.sh new file mode 100644 index 00000000000..9ed279c966e --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/runtests.sh @@ -0,0 +1,4 @@ +#!/bin/bash +go test -race ./... +go test -cover ./... +go test -v ./... diff --git a/vendor/github.com/getsentry/raven-go/stacktrace.go b/vendor/github.com/getsentry/raven-go/stacktrace.go new file mode 100644 index 00000000000..bc302ba119f --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/stacktrace.go @@ -0,0 +1,277 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Some code from the runtime/debug package of the Go standard library. + +package raven + +import ( + "bytes" + "go/build" + "io/ioutil" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/pkg/errors" +) + +// https://docs.getsentry.com/hosted/clientdev/interfaces/#failure-interfaces +type Stacktrace struct { + // Required + Frames []*StacktraceFrame `json:"frames"` +} + +func (s *Stacktrace) Class() string { return "stacktrace" } + +func (s *Stacktrace) Culprit() string { + for i := len(s.Frames) - 1; i >= 0; i-- { + frame := s.Frames[i] + if frame.InApp == true && frame.Module != "" && frame.Function != "" { + return frame.Module + "." + frame.Function + } + } + return "" +} + +type StacktraceFrame struct { + // At least one required + Filename string `json:"filename,omitempty"` + Function string `json:"function,omitempty"` + Module string `json:"module,omitempty"` + + // Optional + Lineno int `json:"lineno,omitempty"` + Colno int `json:"colno,omitempty"` + AbsolutePath string `json:"abs_path,omitempty"` + ContextLine string `json:"context_line,omitempty"` + PreContext []string `json:"pre_context,omitempty"` + PostContext []string `json:"post_context,omitempty"` + InApp bool `json:"in_app"` +} + +// Try to get stacktrace from err as an interface of github.com/pkg/errors, or else NewStacktrace() +func GetOrNewStacktrace(err error, skip int, context int, appPackagePrefixes []string) *Stacktrace { + stacktracer, errHasStacktrace := err.(interface { + StackTrace() errors.StackTrace + }) + if errHasStacktrace { + var frames []*StacktraceFrame + for _, f := range stacktracer.StackTrace() { + pc := uintptr(f) - 1 + fn := runtime.FuncForPC(pc) + var fName string + var file string + var line int + if fn != nil { + file, line = fn.FileLine(pc) + fName = fn.Name() + } else { + file = "unknown" + fName = "unknown" + } + frame := NewStacktraceFrame(pc, fName, file, line, context, appPackagePrefixes) + if frame != nil { + frames = append([]*StacktraceFrame{frame}, frames...) + } + } + return &Stacktrace{Frames: frames} + } else { + return NewStacktrace(skip+1, context, appPackagePrefixes) + } +} + +// Intialize and populate a new stacktrace, skipping skip frames. +// +// context is the number of surrounding lines that should be included for context. +// Setting context to 3 would try to get seven lines. Setting context to -1 returns +// one line with no surrounding context, and 0 returns no context. +// +// appPackagePrefixes is a list of prefixes used to check whether a package should +// be considered "in app". +func NewStacktrace(skip int, context int, appPackagePrefixes []string) *Stacktrace { + var frames []*StacktraceFrame + + callerPcs := make([]uintptr, 100) + numCallers := runtime.Callers(skip+2, callerPcs) + + // If there are no callers, the entire stacktrace is nil + if numCallers == 0 { + return nil + } + + callersFrames := runtime.CallersFrames(callerPcs) + + for { + fr, more := callersFrames.Next() + if fr.Func != nil { + frame := NewStacktraceFrame(fr.PC, fr.Function, fr.File, fr.Line, context, appPackagePrefixes) + if frame != nil { + frames = append(frames, frame) + } + } + if !more { + break + } + } + // If there are no frames, the entire stacktrace is nil + if len(frames) == 0 { + return nil + } + // Optimize the path where there's only 1 frame + if len(frames) == 1 { + return &Stacktrace{frames} + } + // Sentry wants the frames with the oldest first, so reverse them + for i, j := 0, len(frames)-1; i < j; i, j = i+1, j-1 { + frames[i], frames[j] = frames[j], frames[i] + } + return &Stacktrace{frames} +} + +// Build a single frame using data returned from runtime.Caller. +// +// context is the number of surrounding lines that should be included for context. +// Setting context to 3 would try to get seven lines. Setting context to -1 returns +// one line with no surrounding context, and 0 returns no context. +// +// appPackagePrefixes is a list of prefixes used to check whether a package should +// be considered "in app". +func NewStacktraceFrame(pc uintptr, fName, file string, line, context int, appPackagePrefixes []string) *StacktraceFrame { + frame := &StacktraceFrame{AbsolutePath: file, Filename: trimPath(file), Lineno: line, InApp: false} + frame.Module, frame.Function = functionName(fName) + + // `runtime.goexit` is effectively a placeholder that comes from + // runtime/asm_amd64.s and is meaningless. + if frame.Module == "runtime" && frame.Function == "goexit" { + return nil + } + + if frame.Module == "main" { + frame.InApp = true + } else { + for _, prefix := range appPackagePrefixes { + if strings.HasPrefix(frame.Module, prefix) && !strings.Contains(frame.Module, "vendor") && !strings.Contains(frame.Module, "third_party") { + frame.InApp = true + } + } + } + + if context > 0 { + contextLines, lineIdx := sourceCodeLoader.Load(file, line, context) + if len(contextLines) > 0 { + for i, line := range contextLines { + switch { + case i < lineIdx: + frame.PreContext = append(frame.PreContext, string(line)) + case i == lineIdx: + frame.ContextLine = string(line) + default: + frame.PostContext = append(frame.PostContext, string(line)) + } + } + } + } else if context == -1 { + contextLine, _ := sourceCodeLoader.Load(file, line, 0) + if len(contextLine) > 0 { + frame.ContextLine = string(contextLine[0]) + } + } + return frame +} + +// Retrieve the name of the package and function containing the PC. +func functionName(fName string) (pack string, name string) { + name = fName + // We get this: + // runtime/debug.*T·ptrmethod + // and want this: + // pack = runtime/debug + // name = *T.ptrmethod + if idx := strings.LastIndex(name, "."); idx != -1 { + pack = name[:idx] + name = name[idx+1:] + } + name = strings.Replace(name, "·", ".", -1) + return +} + +type SourceCodeLoader interface { + Load(filename string, line, context int) ([][]byte, int) +} + +var sourceCodeLoader SourceCodeLoader = &fsLoader{cache: make(map[string][][]byte)} + +func SetSourceCodeLoader(loader SourceCodeLoader) { + sourceCodeLoader = loader +} + +type fsLoader struct { + mu sync.Mutex + cache map[string][][]byte +} + +func (fs *fsLoader) Load(filename string, line, context int) ([][]byte, int) { + fs.mu.Lock() + defer fs.mu.Unlock() + lines, ok := fs.cache[filename] + if !ok { + data, err := ioutil.ReadFile(filename) + if err != nil { + // cache errors as nil slice: code below handles it correctly + // otherwise when missing the source or running as a different user, we try + // reading the file on each error which is unnecessary + fs.cache[filename] = nil + return nil, 0 + } + lines = bytes.Split(data, []byte{'\n'}) + fs.cache[filename] = lines + } + + if lines == nil { + // cached error from ReadFile: return no lines + return nil, 0 + } + + line-- // stack trace lines are 1-indexed + start := line - context + var idx int + if start < 0 { + start = 0 + idx = line + } else { + idx = context + } + end := line + context + 1 + if line >= len(lines) { + return nil, 0 + } + if end > len(lines) { + end = len(lines) + } + return lines[start:end], idx +} + +var trimPaths []string + +// Try to trim the GOROOT or GOPATH prefix off of a filename +func trimPath(filename string) string { + for _, prefix := range trimPaths { + if trimmed := strings.TrimPrefix(filename, prefix); len(trimmed) < len(filename) { + return trimmed + } + } + return filename +} + +func init() { + // Collect all source directories, and make sure they + // end in a trailing "separator" + for _, prefix := range build.Default.SrcDirs() { + if prefix[len(prefix)-1] != filepath.Separator { + prefix += string(filepath.Separator) + } + trimPaths = append(trimPaths, prefix) + } +} diff --git a/vendor/github.com/getsentry/raven-go/writer.go b/vendor/github.com/getsentry/raven-go/writer.go new file mode 100644 index 00000000000..61f7a91088e --- /dev/null +++ b/vendor/github.com/getsentry/raven-go/writer.go @@ -0,0 +1,20 @@ +package raven + +type Writer struct { + Client *Client + Level Severity + Logger string // Logger name reported to Sentry +} + +// Write formats the byte slice p into a string, and sends a message to +// Sentry at the severity level indicated by the Writer w. +func (w *Writer) Write(p []byte) (int, error) { + message := string(p) + + packet := NewPacket(message, &Message{message, nil}) + packet.Level = w.Level + packet.Logger = w.Logger + w.Client.Capture(packet, nil) + + return len(p), nil +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000000..e256a31e00a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000000..0e9d6edc010 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000000..7805d36de73 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000000..0200f75b4d1 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000000..58600740266 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000000..4fb4054a8b7 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore new file mode 100644 index 00000000000..eb29ebaefd8 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000000..2a577a8f95b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000000..48de631b003 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md new file mode 100644 index 00000000000..3305db0f653 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md new file mode 100644 index 00000000000..b63e1f8fee5 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md new file mode 100644 index 00000000000..b90c7e5c6ba --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -0,0 +1,122 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: + + import "gopkg.in/go-jose/go-jose.v2" + +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): + + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go new file mode 100644 index 00000000000..78abc326830 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go new file mode 100644 index 00000000000..af029cec0ba --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go new file mode 100644 index 00000000000..f62c3bdba5d --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go new file mode 100644 index 00000000000..093c646740b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go new file mode 100644 index 00000000000..b9effbca8a4 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go new file mode 100644 index 00000000000..6901137e446 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -0,0 +1,544 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "github.com/go-jose/go-jose/v3/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, _ = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go new file mode 100644 index 00000000000..71ec1c419b1 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go new file mode 100644 index 00000000000..968a42496e1 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v3/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/go.mod b/vendor/github.com/go-jose/go-jose/v3/go.mod new file mode 100644 index 00000000000..d19ccbf0f4c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/go.mod @@ -0,0 +1,9 @@ +module github.com/go-jose/go-jose/v3 + +go 1.12 + +require ( + github.com/google/go-cmp v0.5.0 + github.com/stretchr/testify v1.6.1 + golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 +) diff --git a/vendor/github.com/go-jose/go-jose/v3/go.sum b/vendor/github.com/go-jose/go-jose/v3/go.sum new file mode 100644 index 00000000000..a77b2d6915f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/go.sum @@ -0,0 +1,33 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-jose/go-jose/v3/json/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md new file mode 100644 index 00000000000..86de5e5581f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go new file mode 100644 index 00000000000..4dbc4146cf9 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go new file mode 100644 index 00000000000..ea0a1361987 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML `) + browserInfoPrefix = []byte(`window.NREUM||(NREUM={});NREUM.info=`) +) + +// browserInfo contains the fields that are marshalled into the Browser agent's +// info hash. +// +// https://newrelic.atlassian.net/wiki/spaces/eng/pages/50299103/BAM+Agent+Auto-Instrumentation +type browserInfo struct { + Beacon string `json:"beacon"` + LicenseKey string `json:"licenseKey"` + ApplicationID string `json:"applicationID"` + TransactionName string `json:"transactionName"` + QueueTimeMillis int64 `json:"queueTime"` + ApplicationTimeMillis int64 `json:"applicationTime"` + ObfuscatedAttributes string `json:"atts"` + ErrorBeacon string `json:"errorBeacon"` + Agent string `json:"agent"` +} + +// BrowserTimingHeader encapsulates the JavaScript required to enable New +// Relic's Browser product. +type BrowserTimingHeader struct { + agentLoader string + info browserInfo +} + +func appendSlices(slices ...[]byte) []byte { + length := 0 + for _, s := range slices { + length += len(s) + } + combined := make([]byte, 0, length) + for _, s := range slices { + combined = append(combined, s...) + } + return combined +} + +// WithTags returns the browser timing JavaScript which includes the enclosing +// tags. This method returns nil if the receiver is +// nil, the feature is disabled, the application is not yet connected, or an +// error occurs. The byte slice returned is in UTF-8 format. +func (h *BrowserTimingHeader) WithTags() []byte { + withoutTags := h.WithoutTags() + if nil == withoutTags { + return nil + } + return appendSlices(browserStartTag, withoutTags, browserEndTag) +} + +// WithoutTags returns the browser timing JavaScript without any enclosing tags, +// which may then be embedded within any JavaScript code. This method returns +// nil if the receiver is nil, the feature is disabled, the application is not +// yet connected, or an error occurs. The byte slice returned is in UTF-8 +// format. +func (h *BrowserTimingHeader) WithoutTags() []byte { + if nil == h { + return nil + } + + // We could memoise this, but it seems unnecessary, since most users are + // going to call this zero or one times. + info, err := json.Marshal(h.info) + if err != nil { + // There's no way to log from here, but this also should be unreachable in + // practice. + return nil + } + + return appendSlices([]byte(h.agentLoader), browserInfoPrefix, info) +} diff --git a/vendor/github.com/newrelic/go-agent/build-script.sh b/vendor/github.com/newrelic/go-agent/build-script.sh new file mode 100644 index 00000000000..71fd37826d1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/build-script.sh @@ -0,0 +1,26 @@ +set -x +set -e + +LATEST_VERSION="go1.13" + +if [ -n "$INTEGRATION" ]; then + cd $INTEGRATION + go get -t ./... +fi + +go test -race -benchtime=1ms -bench=. ./... +go vet ./... + +if [[ -n "$(go version | grep $LATEST_VERSION)" ]]; then + # golint requires a supported version of Go, which in practice is currently 1.9+. + # See: https://github.com/golang/lint#installation + # For simplicity, run it on a single Go version. + go get -u golang.org/x/lint/golint + golint -set_exit_status ./... + + # only run gofmt on a single version as the format changed from 1.10 to + # 1.11. + if [ -n "$(gofmt -s -l .)" ]; then + exit 1 + fi +fi diff --git a/vendor/github.com/newrelic/go-agent/config.go b/vendor/github.com/newrelic/go-agent/config.go new file mode 100644 index 00000000000..170d75daf45 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/config.go @@ -0,0 +1,397 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// Config contains Application and Transaction behavior settings. +// Use NewConfig to create a Config with proper defaults. +type Config struct { + // AppName is used by New Relic to link data across servers. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application + AppName string + + // License is your New Relic license key. + // + // https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key + License string + + // Logger controls go-agent logging. For info level logging to stdout: + // + // cfg.Logger = newrelic.NewLogger(os.Stdout) + // + // For debug level logging to stdout: + // + // cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + // + // See https://github.com/newrelic/go-agent/blob/master/GUIDE.md#logging + // for more examples and logging integrations. + Logger Logger + + // Enabled controls whether the agent will communicate with the New Relic + // servers and spawn goroutines. Setting this to be false is useful in + // testing and staging situations. + Enabled bool + + // Labels are key value pairs used to roll up applications into specific + // categories. + // + // https://docs.newrelic.com/docs/using-new-relic/user-interface-functions/organize-your-data/labels-categories-organize-apps-monitors + Labels map[string]string + + // HighSecurity guarantees that certain agent settings can not be made + // more permissive. This setting must match the corresponding account + // setting in the New Relic UI. + // + // https://docs.newrelic.com/docs/agents/manage-apm-agents/configuration/high-security-mode + HighSecurity bool + + // SecurityPoliciesToken enables security policies if set to a non-empty + // string. Only set this if security policies have been enabled on your + // account. This cannot be used in conjunction with HighSecurity. + // + // https://docs.newrelic.com/docs/agents/manage-apm-agents/configuration/enable-configurable-security-policies + SecurityPoliciesToken string + + // CustomInsightsEvents controls the behavior of + // Application.RecordCustomEvent. + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + CustomInsightsEvents struct { + // Enabled controls whether RecordCustomEvent will collect + // custom analytics events. High security mode overrides this + // setting. + Enabled bool + } + + // TransactionEvents controls the behavior of transaction analytics + // events. + TransactionEvents struct { + // Enabled controls whether transaction events are captured. + Enabled bool + // Attributes controls the attributes included with transaction + // events. + Attributes AttributeDestinationConfig + } + + // ErrorCollector controls the capture of errors. + ErrorCollector struct { + // Enabled controls whether errors are captured. This setting + // affects both traced errors and error analytics events. + Enabled bool + // CaptureEvents controls whether error analytics events are + // captured. + CaptureEvents bool + // IgnoreStatusCodes controls which http response codes are + // automatically turned into errors. By default, response codes + // greater than or equal to 400, with the exception of 404, are + // turned into errors. + IgnoreStatusCodes []int + // Attributes controls the attributes included with errors. + Attributes AttributeDestinationConfig + } + + // TransactionTracer controls the capture of transaction traces. + TransactionTracer struct { + // Enabled controls whether transaction traces are captured. + Enabled bool + // Threshold controls whether a transaction trace will be + // considered for capture. Of the traces exceeding the + // threshold, the slowest trace every minute is captured. + Threshold struct { + // If IsApdexFailing is true then the trace threshold is + // four times the apdex threshold. + IsApdexFailing bool + // If IsApdexFailing is false then this field is the + // threshold, otherwise it is ignored. + Duration time.Duration + } + // SegmentThreshold is the threshold at which segments will be + // added to the trace. Lowering this setting may increase + // overhead. Decrease this duration if your Transaction Traces are + // missing segments. + SegmentThreshold time.Duration + // StackTraceThreshold is the threshold at which segments will + // be given a stack trace in the transaction trace. Lowering + // this setting will increase overhead. + StackTraceThreshold time.Duration + // Attributes controls the attributes included with transaction + // traces. + Attributes AttributeDestinationConfig + // Segments.Attributes controls the attributes included with + // each trace segment. + Segments struct { + Attributes AttributeDestinationConfig + } + } + + // BrowserMonitoring contains settings which control the behavior of + // Transaction.BrowserTimingHeader. + BrowserMonitoring struct { + // Enabled controls whether or not the Browser monitoring feature is + // enabled. + Enabled bool + // Attributes controls the attributes included with Browser monitoring. + // BrowserMonitoring.Attributes.Enabled is false by default, to include + // attributes in the Browser timing Javascript: + // + // cfg.BrowserMonitoring.Attributes.Enabled = true + Attributes AttributeDestinationConfig + } + + // HostDisplayName gives this server a recognizable name in the New + // Relic UI. This is an optional setting. + HostDisplayName string + + // Transport customizes communication with the New Relic servers. This may + // be used to configure a proxy. + Transport http.RoundTripper + + // Utilization controls the detection and gathering of system + // information. + Utilization struct { + // DetectAWS controls whether the Application attempts to detect + // AWS. + DetectAWS bool + // DetectAzure controls whether the Application attempts to detect + // Azure. + DetectAzure bool + // DetectPCF controls whether the Application attempts to detect + // PCF. + DetectPCF bool + // DetectGCP controls whether the Application attempts to detect + // GCP. + DetectGCP bool + // DetectDocker controls whether the Application attempts to + // detect Docker. + DetectDocker bool + // DetectKubernetes controls whether the Application attempts to + // detect Kubernetes. + DetectKubernetes bool + + // These settings provide system information when custom values + // are required. + LogicalProcessors int + TotalRAMMIB int + BillingHostname string + } + + // CrossApplicationTracer controls behaviour relating to cross application + // tracing (CAT), available since Go Agent v0.11. The + // CrossApplicationTracer and the DistributedTracer cannot be + // simultaneously enabled. + // + // https://docs.newrelic.com/docs/apm/transactions/cross-application-traces/introduction-cross-application-traces + CrossApplicationTracer struct { + Enabled bool + } + + // DistributedTracer controls behaviour relating to Distributed Tracing, + // available since Go Agent v2.1. The DistributedTracer and the + // CrossApplicationTracer cannot be simultaneously enabled. + // + // https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/introduction-distributed-tracing + DistributedTracer struct { + Enabled bool + } + + // SpanEvents controls behavior relating to Span Events. Span Events + // require that DistributedTracer is enabled. + SpanEvents struct { + Enabled bool + Attributes AttributeDestinationConfig + } + + // DatastoreTracer controls behavior relating to datastore segments. + DatastoreTracer struct { + // InstanceReporting controls whether the host and port are collected + // for datastore segments. + InstanceReporting struct { + Enabled bool + } + // DatabaseNameReporting controls whether the database name is + // collected for datastore segments. + DatabaseNameReporting struct { + Enabled bool + } + QueryParameters struct { + Enabled bool + } + // SlowQuery controls the capture of slow query traces. Slow + // query traces show you instances of your slowest datastore + // segments. + SlowQuery struct { + Enabled bool + Threshold time.Duration + } + } + + // Attributes controls which attributes are enabled and disabled globally. + // This setting affects all attribute destinations: Transaction Events, + // Error Events, Transaction Traces and segments, Traced Errors, Span + // Events, and Browser timing header. + Attributes AttributeDestinationConfig + + // RuntimeSampler controls the collection of runtime statistics like + // CPU/Memory usage, goroutine count, and GC pauses. + RuntimeSampler struct { + // Enabled controls whether runtime statistics are captured. + Enabled bool + } + + // ServerlessMode contains fields which control behavior when running in + // AWS Lambda. + // + // https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/get-started/introduction-new-relic-monitoring-aws-lambda + ServerlessMode struct { + // Enabling ServerlessMode will print each transaction's data to + // stdout. No agent goroutines will be spawned in serverless mode, and + // no data will be sent directly to the New Relic backend. + // nrlambda.NewConfig sets Enabled to true. + Enabled bool + // ApdexThreshold sets the Apdex threshold when in ServerlessMode. The + // default is 500 milliseconds. nrlambda.NewConfig populates this + // field using the NEW_RELIC_APDEX_T environment variable. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction + ApdexThreshold time.Duration + // AccountID, TrustedAccountKey, and PrimaryAppID are used for + // distributed tracing in ServerlessMode. AccountID and + // TrustedAccountKey must be populated for distributed tracing to be + // enabled. nrlambda.NewConfig populates these fields using the + // NEW_RELIC_ACCOUNT_ID, NEW_RELIC_TRUSTED_ACCOUNT_KEY, and + // NEW_RELIC_PRIMARY_APPLICATION_ID environment variables. + AccountID string + TrustedAccountKey string + PrimaryAppID string + } +} + +// AttributeDestinationConfig controls the attributes sent to each destination. +// For more information, see: +// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/agent-attributes +type AttributeDestinationConfig struct { + // Enabled controls whether or not this destination will get any + // attributes at all. For example, to prevent any attributes from being + // added to errors, set: + // + // cfg.ErrorCollector.Attributes.Enabled = false + // + Enabled bool + Include []string + // Exclude allows you to prevent the capture of certain attributes. For + // example, to prevent the capture of the request URL attribute + // "request.uri", set: + // + // cfg.Attributes.Exclude = append(cfg.Attributes.Exclude, newrelic.AttributeRequestURI) + // + // The '*' character acts as a wildcard. For example, to prevent the + // capture of all request related attributes, set: + // + // cfg.Attributes.Exclude = append(cfg.Attributes.Exclude, "request.*") + // + Exclude []string +} + +// NewConfig creates a Config populated with default settings and the given +// appname and license. +func NewConfig(appname, license string) Config { + c := Config{} + + c.AppName = appname + c.License = license + c.Enabled = true + c.Labels = make(map[string]string) + c.CustomInsightsEvents.Enabled = true + c.TransactionEvents.Enabled = true + c.TransactionEvents.Attributes.Enabled = true + c.HighSecurity = false + c.ErrorCollector.Enabled = true + c.ErrorCollector.CaptureEvents = true + c.ErrorCollector.IgnoreStatusCodes = []int{ + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + 0, // gRPC OK + 5, // gRPC NOT_FOUND + http.StatusNotFound, // 404 + } + c.ErrorCollector.Attributes.Enabled = true + c.Utilization.DetectAWS = true + c.Utilization.DetectAzure = true + c.Utilization.DetectPCF = true + c.Utilization.DetectGCP = true + c.Utilization.DetectDocker = true + c.Utilization.DetectKubernetes = true + c.Attributes.Enabled = true + c.RuntimeSampler.Enabled = true + + c.TransactionTracer.Enabled = true + c.TransactionTracer.Threshold.IsApdexFailing = true + c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond + c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond + c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond + c.TransactionTracer.Attributes.Enabled = true + c.TransactionTracer.Segments.Attributes.Enabled = true + + c.BrowserMonitoring.Enabled = true + // browser monitoring attributes are disabled by default + c.BrowserMonitoring.Attributes.Enabled = false + + c.CrossApplicationTracer.Enabled = true + c.DistributedTracer.Enabled = false + c.SpanEvents.Enabled = true + c.SpanEvents.Attributes.Enabled = true + + c.DatastoreTracer.InstanceReporting.Enabled = true + c.DatastoreTracer.DatabaseNameReporting.Enabled = true + c.DatastoreTracer.QueryParameters.Enabled = true + c.DatastoreTracer.SlowQuery.Enabled = true + c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond + + c.ServerlessMode.ApdexThreshold = 500 * time.Millisecond + c.ServerlessMode.Enabled = false + + return c +} + +const ( + licenseLength = 40 + appNameLimit = 3 +) + +// The following errors will be returned if your Config fails to validate. +var ( + errLicenseLen = fmt.Errorf("license length is not %d", licenseLength) + errAppNameMissing = errors.New("string AppName required") + errAppNameLimit = fmt.Errorf("max of %d rollup application names", appNameLimit) + errHighSecurityWithSecurityPolicies = errors.New("SecurityPoliciesToken and HighSecurity are incompatible; please ensure HighSecurity is set to false if SecurityPoliciesToken is a non-empty string and a security policy has been set for your account") +) + +// Validate checks the config for improper fields. If the config is invalid, +// newrelic.NewApplication returns an error. +func (c Config) Validate() error { + if c.Enabled && !c.ServerlessMode.Enabled { + if len(c.License) != licenseLength { + return errLicenseLen + } + } else { + // The License may be empty when the agent is not enabled. + if len(c.License) != licenseLength && len(c.License) != 0 { + return errLicenseLen + } + } + if "" == c.AppName && c.Enabled && !c.ServerlessMode.Enabled { + return errAppNameMissing + } + if c.HighSecurity && "" != c.SecurityPoliciesToken { + return errHighSecurityWithSecurityPolicies + } + if strings.Count(c.AppName, ";") >= appNameLimit { + return errAppNameLimit + } + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/context.go b/vendor/github.com/newrelic/go-agent/context.go new file mode 100644 index 00000000000..d0063135e4a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/context.go @@ -0,0 +1,49 @@ +// +build go1.7 + +package newrelic + +import ( + "context" + "net/http" + + "github.com/newrelic/go-agent/internal" +) + +// NewContext returns a new Context that carries the provided transaction. +func NewContext(ctx context.Context, txn Transaction) context.Context { + return context.WithValue(ctx, internal.TransactionContextKey, txn) +} + +// FromContext returns the Transaction from the context if present, and nil +// otherwise. +func FromContext(ctx context.Context) Transaction { + h, _ := ctx.Value(internal.TransactionContextKey).(Transaction) + if nil != h { + return h + } + // If we couldn't find a transaction using + // internal.TransactionContextKey, try with + // internal.GinTransactionContextKey. Unfortunately, gin.Context.Set + // requires a string key, so we cannot use + // internal.TransactionContextKey in nrgin.Middleware. We check for two + // keys (rather than turning internal.TransactionContextKey into a + // string key) because context.WithValue will cause golint to complain + // if used with a string key. + h, _ = ctx.Value(internal.GinTransactionContextKey).(Transaction) + return h +} + +// RequestWithTransactionContext adds the transaction to the request's context. +func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request { + ctx := req.Context() + ctx = NewContext(ctx, txn) + return req.WithContext(ctx) +} + +func transactionFromRequestContext(req *http.Request) Transaction { + var txn Transaction + if nil != req { + txn = FromContext(req.Context()) + } + return txn +} diff --git a/vendor/github.com/newrelic/go-agent/context_stub.go b/vendor/github.com/newrelic/go-agent/context_stub.go new file mode 100644 index 00000000000..03ea58da35d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/context_stub.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package newrelic + +import "net/http" + +// RequestWithTransactionContext adds the transaction to the request's context. +func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request { + return req +} + +func transactionFromRequestContext(req *http.Request) Transaction { + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/datastore.go b/vendor/github.com/newrelic/go-agent/datastore.go new file mode 100644 index 00000000000..9dff98f35d5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/datastore.go @@ -0,0 +1,31 @@ +package newrelic + +// DatastoreProduct is used to identify your datastore type in New Relic. It +// is used in the DatastoreSegment Product field. See +// https://github.com/newrelic/go-agent/blob/master/datastore.go for the full +// list of available DatastoreProducts. +type DatastoreProduct string + +// Datastore names used across New Relic agents: +const ( + DatastoreCassandra DatastoreProduct = "Cassandra" + DatastoreDerby = "Derby" + DatastoreElasticsearch = "Elasticsearch" + DatastoreFirebird = "Firebird" + DatastoreIBMDB2 = "IBMDB2" + DatastoreInformix = "Informix" + DatastoreMemcached = "Memcached" + DatastoreMongoDB = "MongoDB" + DatastoreMySQL = "MySQL" + DatastoreMSSQL = "MSSQL" + DatastoreNeptune = "Neptune" + DatastoreOracle = "Oracle" + DatastorePostgres = "Postgres" + DatastoreRedis = "Redis" + DatastoreSolr = "Solr" + DatastoreSQLite = "SQLite" + DatastoreCouchDB = "CouchDB" + DatastoreRiak = "Riak" + DatastoreVoltDB = "VoltDB" + DatastoreDynamoDB = "DynamoDB" +) diff --git a/vendor/github.com/newrelic/go-agent/errors.go b/vendor/github.com/newrelic/go-agent/errors.go new file mode 100644 index 00000000000..c2020716612 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/errors.go @@ -0,0 +1,42 @@ +package newrelic + +// StackTracer can be implemented by errors to provide a stack trace when using +// Transaction.NoticeError. +type StackTracer interface { + StackTrace() []uintptr +} + +// ErrorClasser can be implemented by errors to provide a custom class when +// using Transaction.NoticeError. +type ErrorClasser interface { + ErrorClass() string +} + +// ErrorAttributer can be implemented by errors to provide extra context when +// using Transaction.NoticeError. +type ErrorAttributer interface { + ErrorAttributes() map[string]interface{} +} + +// Error is an error that implements ErrorClasser and ErrorAttributer. Use it +// with Transaction.NoticeError to directly control error message, class, and +// attributes. +type Error struct { + // Message is the error message which will be returned by the Error() + // method. + Message string + // Class indicates how the error may be aggregated. + Class string + // Attributes are attached to traced errors and error events for + // additional context. These attributes are validated just like those + // added to `Transaction.AddAttribute`. + Attributes map[string]interface{} +} + +func (e Error) Error() string { return e.Message } + +// ErrorClass implements the ErrorClasser interface. +func (e Error) ErrorClass() string { return e.Class } + +// ErrorAttributes implements the ErrorAttributes interface. +func (e Error) ErrorAttributes() map[string]interface{} { return e.Attributes } diff --git a/vendor/github.com/newrelic/go-agent/instrumentation.go b/vendor/github.com/newrelic/go-agent/instrumentation.go new file mode 100644 index 00000000000..a5c50c2d3ca --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/instrumentation.go @@ -0,0 +1,113 @@ +package newrelic + +import ( + "net/http" +) + +// instrumentation.go contains helpers built on the lower level api. + +// WrapHandle instruments http.Handler handlers with transactions. To +// instrument this code: +// +// http.Handle("/foo", myHandler) +// +// Perform this replacement: +// +// http.Handle(newrelic.WrapHandle(app, "/foo", myHandler)) +// +// WrapHandle adds the Transaction to the request's context. Access it using +// FromContext to add attributes, create segments, or notice errors: +// +// func myHandler(rw ResponseWriter, req *Request) { +// if txn := newrelic.FromContext(req.Context()); nil != txn { +// txn.AddAttribute("customerLevel", "gold") +// } +// } +// +// This function is safe to call if app is nil. +func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) { + if app == nil { + return pattern, handler + } + return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + txn := app.StartTransaction(pattern, w, r) + defer txn.End() + + r = RequestWithTransactionContext(r, txn) + + handler.ServeHTTP(txn, r) + }) +} + +// WrapHandleFunc instruments handler functions using transactions. To +// instrument this code: +// +// http.HandleFunc("/users", func(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "users page") +// }) +// +// Perform this replacement: +// +// http.HandleFunc(WrapHandleFunc(app, "/users", func(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "users page") +// })) +// +// WrapHandleFunc adds the Transaction to the request's context. Access it using +// FromContext to add attributes, create segments, or notice errors: +// +// http.HandleFunc(WrapHandleFunc(app, "/users", func(w http.ResponseWriter, req *http.Request) { +// if txn := newrelic.FromContext(req.Context()); nil != txn { +// txn.AddAttribute("customerLevel", "gold") +// } +// io.WriteString(w, "users page") +// })) +// +// This function is safe to call if app is nil. +func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) { + p, h := WrapHandle(app, pattern, http.HandlerFunc(handler)) + return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) } +} + +// NewRoundTripper creates an http.RoundTripper to instrument external requests +// and add distributed tracing headers. The RoundTripper returned creates an +// external segment before delegating to the original RoundTripper provided (or +// http.DefaultTransport if none is provided). If the Transaction parameter is +// nil then the RoundTripper will look for a Transaction in the request's +// context (using FromContext). Using a nil Transaction is STRONGLY recommended +// because it allows the same RoundTripper (and client) to be reused for +// multiple transactions. +func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(request *http.Request) (*http.Response, error) { + // The specification of http.RoundTripper requires that the request is never modified. + request = cloneRequest(request) + segment := StartExternalSegment(txn, request) + + if nil == original { + original = http.DefaultTransport + } + response, err := original.RoundTrip(request) + + segment.Response = response + segment.End() + + return response, err + }) +} + +// cloneRequest mimics implementation of +// https://godoc.org/github.com/google/go-github/github#BasicAuthTransport.RoundTrip +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } diff --git a/vendor/github.com/newrelic/go-agent/internal/adaptive_sampler.go b/vendor/github.com/newrelic/go-agent/internal/adaptive_sampler.go new file mode 100644 index 00000000000..f965d85343a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/adaptive_sampler.go @@ -0,0 +1,99 @@ +package internal + +import ( + "math" + "sync" + "time" +) + +// AdaptiveSampler calculates which transactions should be sampled. An interface +// is used in the connect reply to facilitate testing. +type AdaptiveSampler interface { + ComputeSampled(priority float32, now time.Time) bool +} + +// SampleEverything is used for testing. +type SampleEverything struct{} + +// SampleNothing is used when the application is not yet connected. +type SampleNothing struct{} + +// ComputeSampled implements AdaptiveSampler. +func (s SampleEverything) ComputeSampled(priority float32, now time.Time) bool { return true } + +// ComputeSampled implements AdaptiveSampler. +func (s SampleNothing) ComputeSampled(priority float32, now time.Time) bool { return false } + +type adaptiveSampler struct { + sync.Mutex + period time.Duration + target uint64 + + // Transactions with priority higher than this are sampled. + // This is 1 - sampleRatio. + priorityMin float32 + + currentPeriod struct { + numSampled uint64 + numSeen uint64 + end time.Time + } +} + +// NewAdaptiveSampler creates an AdaptiveSampler. +func NewAdaptiveSampler(period time.Duration, target uint64, now time.Time) AdaptiveSampler { + as := &adaptiveSampler{} + as.period = period + as.target = target + as.currentPeriod.end = now.Add(period) + + // Sample the first transactions in the first period. + as.priorityMin = 0.0 + return as +} + +// ComputeSampled calculates if the transaction should be sampled. +func (as *adaptiveSampler) ComputeSampled(priority float32, now time.Time) bool { + as.Lock() + defer as.Unlock() + + // If the current time is after the end of the "currentPeriod". This is in + // a `for`/`while` loop in case there's a harvest where no sampling happened. + // i.e. for situations where a single call to + // as.currentPeriod.end = as.currentPeriod.end.Add(as.period) + // might not catch us up to the current period + for now.After(as.currentPeriod.end) { + as.priorityMin = 0.0 + if as.currentPeriod.numSeen > 0 { + sampledRatio := float32(as.target) / float32(as.currentPeriod.numSeen) + as.priorityMin = 1.0 - sampledRatio + } + as.currentPeriod.numSampled = 0 + as.currentPeriod.numSeen = 0 + as.currentPeriod.end = as.currentPeriod.end.Add(as.period) + } + + as.currentPeriod.numSeen++ + + // exponential backoff -- if the number of sampled items is greater than our + // target, we need to apply the exponential backoff + if as.currentPeriod.numSampled > as.target { + if as.computeSampledBackoff(as.target, as.currentPeriod.numSeen, as.currentPeriod.numSampled) { + as.currentPeriod.numSampled++ + return true + } + return false + } + + if priority >= as.priorityMin { + as.currentPeriod.numSampled++ + return true + } + + return false +} + +func (as *adaptiveSampler) computeSampledBackoff(target uint64, decidedCount uint64, sampledTrueCount uint64) bool { + return float64(RandUint64N(decidedCount)) < + math.Pow(float64(target), (float64(target)/float64(sampledTrueCount)))-math.Pow(float64(target), 0.5) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/analytics_events.go b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go new file mode 100644 index 00000000000..cb1b94f722b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go @@ -0,0 +1,145 @@ +package internal + +import ( + "bytes" + "container/heap" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type analyticsEvent struct { + priority Priority + jsonWriter +} + +type analyticsEventHeap []analyticsEvent + +type analyticsEvents struct { + numSeen int + events analyticsEventHeap + failedHarvests int +} + +func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } +func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) } + +func (h analyticsEventHeap) Len() int { return len(h) } +func (h analyticsEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h analyticsEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h analyticsEventHeap) Push(x interface{}) {} +func (h analyticsEventHeap) Pop() interface{} { return nil } + +func newAnalyticsEvents(max int) *analyticsEvents { + return &analyticsEvents{ + numSeen: 0, + events: make(analyticsEventHeap, 0, max), + failedHarvests: 0, + } +} + +func (events *analyticsEvents) capacity() int { + return cap(events.events) +} + +func (events *analyticsEvents) addEvent(e analyticsEvent) { + events.numSeen++ + + if events.capacity() == 0 { + // Configurable event harvest limits may be zero. + return + } + + if len(events.events) < cap(events.events) { + events.events = append(events.events, e) + if len(events.events) == cap(events.events) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(events.events) + } + return + } + + if e.priority.isLowerPriority((events.events)[0].priority) { + return + } + + events.events[0] = e + heap.Fix(events.events, 0) +} + +func (events *analyticsEvents) mergeFailed(other *analyticsEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +func (events *analyticsEvents) Merge(other *analyticsEvents) { + allSeen := events.numSeen + other.numSeen + + for _, e := range other.events { + events.addEvent(e) + } + events.numSeen = allSeen +} + +func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == len(events.events) { + return nil, nil + } + + estimate := 256 * len(events.events) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"reservoir_size":`) + jsonx.AppendUint(buf, uint64(cap(events.events))) + buf.WriteByte(',') + buf.WriteString(`"events_seen":`) + jsonx.AppendUint(buf, uint64(events.numSeen)) + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range events.events { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + + return buf.Bytes(), nil + +} + +// split splits the events into two. NOTE! The two event pools are not valid +// priority queues, and should only be used to create JSON, not for adding any +// events. +func (events *analyticsEvents) split() (*analyticsEvents, *analyticsEvents) { + // numSeen is conserved: e1.numSeen + e2.numSeen == events.numSeen. + e1 := &analyticsEvents{ + numSeen: len(events.events) / 2, + events: make([]analyticsEvent, len(events.events)/2), + failedHarvests: events.failedHarvests, + } + e2 := &analyticsEvents{ + numSeen: events.numSeen - e1.numSeen, + events: make([]analyticsEvent, len(events.events)-len(e1.events)), + failedHarvests: events.failedHarvests, + } + // Note that slicing is not used to ensure that length == capacity for + // e1.events and e2.events. + copy(e1.events, events.events) + copy(e2.events, events.events[len(events.events)/2:]) + + return e1, e2 +} diff --git a/vendor/github.com/newrelic/go-agent/internal/apdex.go b/vendor/github.com/newrelic/go-agent/internal/apdex.go new file mode 100644 index 00000000000..28225f7d017 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/apdex.go @@ -0,0 +1,48 @@ +package internal + +import "time" + +// ApdexZone is a transaction classification. +type ApdexZone int + +// https://en.wikipedia.org/wiki/Apdex +const ( + ApdexNone ApdexZone = iota + ApdexSatisfying + ApdexTolerating + ApdexFailing +) + +// ApdexFailingThreshold calculates the threshold at which the transaction is +// considered a failure. +func ApdexFailingThreshold(threshold time.Duration) time.Duration { + return 4 * threshold +} + +// CalculateApdexZone calculates the apdex based on the transaction duration and +// threshold. +// +// Note that this does not take into account whether or not the transaction +// had an error. That is expected to be done by the caller. +func CalculateApdexZone(threshold, duration time.Duration) ApdexZone { + if duration <= threshold { + return ApdexSatisfying + } + if duration <= ApdexFailingThreshold(threshold) { + return ApdexTolerating + } + return ApdexFailing +} + +func (zone ApdexZone) label() string { + switch zone { + case ApdexSatisfying: + return "S" + case ApdexTolerating: + return "T" + case ApdexFailing: + return "F" + default: + return "" + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/attributes.go b/vendor/github.com/newrelic/go-agent/internal/attributes.go new file mode 100644 index 00000000000..96cb3ecbe8a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/attributes.go @@ -0,0 +1,601 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// AgentAttributeID uniquely identifies each agent attribute. +type AgentAttributeID int + +// New agent attributes must be added in the following places: +// * Constants here. +// * Top level attributes.go file. +// * agentAttributeInfo +const ( + AttributeHostDisplayName AgentAttributeID = iota + attributeRequestMethod + attributeRequestAcceptHeader + attributeRequestContentType + attributeRequestContentLength + attributeRequestHeadersHost + attributeRequestHeadersUserAgent + attributeRequestHeadersReferer + attributeRequestURI + attributeResponseHeadersContentType + attributeResponseHeadersContentLength + attributeResponseCode + AttributeAWSRequestID + AttributeAWSLambdaARN + AttributeAWSLambdaColdStart + AttributeAWSLambdaEventSourceARN +) + +// SpanAttribute is an attribute put in span events. +type SpanAttribute string + +// AddAgentSpanAttributer should be implemented by the Transaction. +type AddAgentSpanAttributer interface { + AddAgentSpanAttribute(key SpanAttribute, val string) +} + +// AddAgentSpanAttribute allows instrumentation packages to add span attributes. +func AddAgentSpanAttribute(txn interface{}, key SpanAttribute, val string) { + if aa, ok := txn.(AddAgentSpanAttributer); ok { + aa.AddAgentSpanAttribute(key, val) + } +} + +// These span event string constants must match the contents of the top level +// attributes.go file. +const ( + spanAttributeDBStatement SpanAttribute = "db.statement" + spanAttributeDBInstance SpanAttribute = "db.instance" + spanAttributeDBCollection SpanAttribute = "db.collection" + spanAttributePeerAddress SpanAttribute = "peer.address" + spanAttributePeerHostname SpanAttribute = "peer.hostname" + spanAttributeHTTPURL SpanAttribute = "http.url" + spanAttributeHTTPMethod SpanAttribute = "http.method" + // query parameters only appear in segments, not span events, but is + // listed as span attributes to simplify code. + spanAttributeQueryParameters SpanAttribute = "query_parameters" + // These span attributes are added by aws sdk instrumentation. + // https://source.datanerd.us/agents/agent-specs/blob/master/implementation_guides/aws-sdk.md#span-and-segment-attributes + SpanAttributeAWSOperation SpanAttribute = "aws.operation" + SpanAttributeAWSRequestID SpanAttribute = "aws.requestId" + SpanAttributeAWSRegion SpanAttribute = "aws.region" +) + +func (sa SpanAttribute) String() string { return string(sa) } + +var ( + usualDests = DestAll &^ destBrowser + tracesDests = destTxnTrace | destError + agentAttributeInfo = map[AgentAttributeID]struct { + name string + defaultDests destinationSet + }{ + AttributeHostDisplayName: {name: "host.displayName", defaultDests: usualDests}, + attributeRequestMethod: {name: "request.method", defaultDests: usualDests}, + attributeRequestAcceptHeader: {name: "request.headers.accept", defaultDests: usualDests}, + attributeRequestContentType: {name: "request.headers.contentType", defaultDests: usualDests}, + attributeRequestContentLength: {name: "request.headers.contentLength", defaultDests: usualDests}, + attributeRequestHeadersHost: {name: "request.headers.host", defaultDests: usualDests}, + attributeRequestHeadersUserAgent: {name: "request.headers.User-Agent", defaultDests: tracesDests}, + attributeRequestHeadersReferer: {name: "request.headers.referer", defaultDests: tracesDests}, + attributeRequestURI: {name: "request.uri", defaultDests: usualDests}, + attributeResponseHeadersContentType: {name: "response.headers.contentType", defaultDests: usualDests}, + attributeResponseHeadersContentLength: {name: "response.headers.contentLength", defaultDests: usualDests}, + attributeResponseCode: {name: "httpResponseCode", defaultDests: usualDests}, + AttributeAWSRequestID: {name: "aws.requestId", defaultDests: usualDests}, + AttributeAWSLambdaARN: {name: "aws.lambda.arn", defaultDests: usualDests}, + AttributeAWSLambdaColdStart: {name: "aws.lambda.coldStart", defaultDests: usualDests}, + AttributeAWSLambdaEventSourceARN: {name: "aws.lambda.eventSource.arn", defaultDests: usualDests}, + } + spanAttributes = []SpanAttribute{ + spanAttributeDBStatement, + spanAttributeDBInstance, + spanAttributeDBCollection, + spanAttributePeerAddress, + spanAttributePeerHostname, + spanAttributeHTTPURL, + spanAttributeHTTPMethod, + spanAttributeQueryParameters, + SpanAttributeAWSOperation, + SpanAttributeAWSRequestID, + SpanAttributeAWSRegion, + } +) + +func (id AgentAttributeID) name() string { return agentAttributeInfo[id].name } + +// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md + +// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to +// avoid circular dependency issues. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +type destinationSet int + +const ( + destTxnEvent destinationSet = 1 << iota + destError + destTxnTrace + destBrowser + destSpan + destSegment +) + +const ( + destNone destinationSet = 0 + // DestAll contains all destinations. + DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser | destSpan | destSegment +) + +const ( + attributeWildcardSuffix = '*' +) + +type attributeModifier struct { + match string // This will not contain a trailing '*'. + includeExclude +} + +type byMatch []*attributeModifier + +func (m byMatch) Len() int { return len(m) } +func (m byMatch) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match } + +// AttributeConfig is created at connect and shared between all transactions. +type AttributeConfig struct { + disabledDestinations destinationSet + exactMatchModifiers map[string]*attributeModifier + // Once attributeConfig is constructed, wildcardModifiers is sorted in + // lexicographical order. Modifiers appearing later have precedence + // over modifiers appearing earlier. + wildcardModifiers []*attributeModifier + agentDests map[AgentAttributeID]destinationSet + spanDests map[SpanAttribute]destinationSet +} + +type includeExclude struct { + include destinationSet + exclude destinationSet +} + +func modifierApply(m *attributeModifier, d destinationSet) destinationSet { + // Include before exclude, since exclude has priority. + d |= m.include + d &^= m.exclude + return d +} + +func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet { + // Important: The wildcard modifiers must be applied before the exact + // match modifiers, and the slice must be iterated in a forward + // direction. + for _, m := range c.wildcardModifiers { + if strings.HasPrefix(key, m.match) { + d = modifierApply(m, d) + } + } + + if m, ok := c.exactMatchModifiers[key]; ok { + d = modifierApply(m, d) + } + + d &^= c.disabledDestinations + + return d +} + +func addModifier(c *AttributeConfig, match string, d includeExclude) { + if "" == match { + return + } + exactMatch := true + if attributeWildcardSuffix == match[len(match)-1] { + exactMatch = false + match = match[0 : len(match)-1] + } + mod := &attributeModifier{ + match: match, + includeExclude: d, + } + + if exactMatch { + if m, ok := c.exactMatchModifiers[mod.match]; ok { + m.include |= mod.include + m.exclude |= mod.exclude + } else { + c.exactMatchModifiers[mod.match] = mod + } + } else { + for _, m := range c.wildcardModifiers { + // Important: Duplicate entries for the same match + // string would not work because exclude needs + // precedence over include. + if m.match == mod.match { + m.include |= mod.include + m.exclude |= mod.exclude + return + } + } + c.wildcardModifiers = append(c.wildcardModifiers, mod) + } +} + +func processDest(c *AttributeConfig, includeEnabled bool, dc *AttributeDestinationConfig, d destinationSet) { + if !dc.Enabled { + c.disabledDestinations |= d + } + if includeEnabled { + for _, match := range dc.Include { + addModifier(c, match, includeExclude{include: d}) + } + } + for _, match := range dc.Exclude { + addModifier(c, match, includeExclude{exclude: d}) + } +} + +// AttributeConfigInput is used as the input to CreateAttributeConfig: it +// transforms newrelic.Config settings into an AttributeConfig. +type AttributeConfigInput struct { + Attributes AttributeDestinationConfig + ErrorCollector AttributeDestinationConfig + TransactionEvents AttributeDestinationConfig + BrowserMonitoring AttributeDestinationConfig + TransactionTracer AttributeDestinationConfig + SpanEvents AttributeDestinationConfig + TraceSegments AttributeDestinationConfig +} + +var ( + sampleAttributeConfigInput = AttributeConfigInput{ + Attributes: AttributeDestinationConfig{Enabled: true}, + ErrorCollector: AttributeDestinationConfig{Enabled: true}, + TransactionEvents: AttributeDestinationConfig{Enabled: true}, + TransactionTracer: AttributeDestinationConfig{Enabled: true}, + BrowserMonitoring: AttributeDestinationConfig{Enabled: true}, + SpanEvents: AttributeDestinationConfig{Enabled: true}, + TraceSegments: AttributeDestinationConfig{Enabled: true}, + } +) + +// CreateAttributeConfig creates a new AttributeConfig. +func CreateAttributeConfig(input AttributeConfigInput, includeEnabled bool) *AttributeConfig { + c := &AttributeConfig{ + exactMatchModifiers: make(map[string]*attributeModifier), + wildcardModifiers: make([]*attributeModifier, 0, 64), + } + + processDest(c, includeEnabled, &input.Attributes, DestAll) + processDest(c, includeEnabled, &input.ErrorCollector, destError) + processDest(c, includeEnabled, &input.TransactionEvents, destTxnEvent) + processDest(c, includeEnabled, &input.TransactionTracer, destTxnTrace) + processDest(c, includeEnabled, &input.BrowserMonitoring, destBrowser) + processDest(c, includeEnabled, &input.SpanEvents, destSpan) + processDest(c, includeEnabled, &input.TraceSegments, destSegment) + + sort.Sort(byMatch(c.wildcardModifiers)) + + c.agentDests = make(map[AgentAttributeID]destinationSet) + for id, info := range agentAttributeInfo { + c.agentDests[id] = applyAttributeConfig(c, info.name, info.defaultDests) + } + c.spanDests = make(map[SpanAttribute]destinationSet, len(spanAttributes)) + for _, id := range spanAttributes { + c.spanDests[id] = applyAttributeConfig(c, id.String(), destSpan|destSegment) + } + + return c +} + +type userAttribute struct { + value interface{} + dests destinationSet +} + +type agentAttributeValue struct { + stringVal string + otherVal interface{} +} + +type agentAttributes map[AgentAttributeID]agentAttributeValue + +func (a *Attributes) filterSpanAttributes(s map[SpanAttribute]jsonWriter, d destinationSet) map[SpanAttribute]jsonWriter { + if nil != a { + for key := range s { + if a.config.spanDests[key]&d == 0 { + delete(s, key) + } + } + } + return s +} + +// GetAgentValue is used to access agent attributes. This function returns ("", +// nil) if the attribute doesn't exist or it doesn't match the destinations +// provided. +func (a *Attributes) GetAgentValue(id AgentAttributeID, d destinationSet) (string, interface{}) { + if nil == a || 0 == a.config.agentDests[id]&d { + return "", nil + } + v, _ := a.Agent[id] + return v.stringVal, v.otherVal +} + +// AddAgentAttributer allows instrumentation to add agent attributes without +// exposing a Transaction method. +type AddAgentAttributer interface { + AddAgentAttribute(id AgentAttributeID, stringVal string, otherVal interface{}) +} + +// Add is used to add agent attributes. Only one of stringVal and +// otherVal should be populated. Since most agent attribute values are strings, +// stringVal exists to avoid allocations. +func (attr agentAttributes) Add(id AgentAttributeID, stringVal string, otherVal interface{}) { + if "" != stringVal || otherVal != nil { + attr[id] = agentAttributeValue{ + stringVal: truncateStringValueIfLong(stringVal), + otherVal: otherVal, + } + } +} + +// Attributes are key value pairs attached to the various collected data types. +type Attributes struct { + config *AttributeConfig + user map[string]userAttribute + Agent agentAttributes +} + +// NewAttributes creates a new Attributes. +func NewAttributes(config *AttributeConfig) *Attributes { + return &Attributes{ + config: config, + Agent: make(agentAttributes), + } +} + +// ErrInvalidAttributeType is returned when the value is not valid. +type ErrInvalidAttributeType struct { + key string + val interface{} +} + +func (e ErrInvalidAttributeType) Error() string { + return fmt.Sprintf("attribute '%s' value of type %T is invalid", e.key, e.val) +} + +type invalidAttributeKeyErr struct{ key string } + +func (e invalidAttributeKeyErr) Error() string { + return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d", + e.key, attributeKeyLengthLimit) +} + +type userAttributeLimitErr struct{ key string } + +func (e userAttributeLimitErr) Error() string { + return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key, + attributeUserLimit) +} + +func truncateStringValueIfLong(val string) string { + if len(val) > attributeValueLengthLimit { + return StringLengthByteLimit(val, attributeValueLengthLimit) + } + return val +} + +// ValidateUserAttribute validates a user attribute. +func ValidateUserAttribute(key string, val interface{}) (interface{}, error) { + if str, ok := val.(string); ok { + val = interface{}(truncateStringValueIfLong(str)) + } + + switch val.(type) { + case string, bool, + uint8, uint16, uint32, uint64, int8, int16, int32, int64, + float32, float64, uint, int, uintptr: + default: + return nil, ErrInvalidAttributeType{ + key: key, + val: val, + } + } + + // Attributes whose keys are excessively long are dropped rather than + // truncated to avoid worrying about the application of configuration to + // truncated values or performing the truncation after configuration. + if len(key) > attributeKeyLengthLimit { + return nil, invalidAttributeKeyErr{key: key} + } + return val, nil +} + +// AddUserAttribute adds a user attribute. +func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error { + val, err := ValidateUserAttribute(key, val) + if nil != err { + return err + } + dests := applyAttributeConfig(a.config, key, d) + if destNone == dests { + return nil + } + if nil == a.user { + a.user = make(map[string]userAttribute) + } + + if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit { + return userAttributeLimitErr{key} + } + + // Note: Duplicates are overridden: last attribute in wins. + a.user[key] = userAttribute{ + value: val, + dests: dests, + } + return nil +} + +func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) { + switch v := val.(type) { + case string: + w.stringField(key, v) + case bool: + if v { + w.rawField(key, `true`) + } else { + w.rawField(key, `false`) + } + case uint8: + w.intField(key, int64(v)) + case uint16: + w.intField(key, int64(v)) + case uint32: + w.intField(key, int64(v)) + case uint64: + w.intField(key, int64(v)) + case uint: + w.intField(key, int64(v)) + case uintptr: + w.intField(key, int64(v)) + case int8: + w.intField(key, int64(v)) + case int16: + w.intField(key, int64(v)) + case int32: + w.intField(key, int64(v)) + case int64: + w.intField(key, v) + case int: + w.intField(key, int64(v)) + case float32: + w.floatField(key, float64(v)) + case float64: + w.floatField(key, v) + default: + w.stringField(key, fmt.Sprintf("%T", v)) + } +} + +func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + if nil == a { + buf.WriteString("{}") + return + } + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + for id, val := range a.Agent { + if 0 != a.config.agentDests[id]&d { + if val.stringVal != "" { + w.stringField(id.name(), val.stringVal) + } else { + writeAttributeValueJSON(&w, id.name(), val.otherVal) + } + } + } + buf.WriteByte('}') + +} + +func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet, extraAttributes map[string]interface{}) { + buf.WriteByte('{') + if nil != a { + w := jsonFieldsWriter{buf: buf} + for key, val := range extraAttributes { + outputDest := applyAttributeConfig(a.config, key, d) + if 0 != outputDest&d { + writeAttributeValueJSON(&w, key, val) + } + } + for name, atr := range a.user { + if 0 != atr.dests&d { + if _, found := extraAttributes[name]; found { + continue + } + writeAttributeValueJSON(&w, name, atr.value) + } + } + } + buf.WriteByte('}') +} + +// userAttributesStringJSON is only used for testing. +func userAttributesStringJSON(a *Attributes, d destinationSet, extraAttributes map[string]interface{}) string { + estimate := len(a.user) * 128 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + userAttributesJSON(a, buf, d, extraAttributes) + return buf.String() +} + +// RequestAgentAttributes gathers agent attributes out of the request. +func RequestAgentAttributes(a *Attributes, method string, h http.Header, u *url.URL) { + a.Agent.Add(attributeRequestMethod, method, nil) + + if nil != u { + a.Agent.Add(attributeRequestURI, SafeURL(u), nil) + } + + if nil == h { + return + } + a.Agent.Add(attributeRequestAcceptHeader, h.Get("Accept"), nil) + a.Agent.Add(attributeRequestContentType, h.Get("Content-Type"), nil) + a.Agent.Add(attributeRequestHeadersHost, h.Get("Host"), nil) + a.Agent.Add(attributeRequestHeadersUserAgent, h.Get("User-Agent"), nil) + a.Agent.Add(attributeRequestHeadersReferer, SafeURLFromString(h.Get("Referer")), nil) + + if l := GetContentLengthFromHeader(h); l >= 0 { + a.Agent.Add(attributeRequestContentLength, "", l) + } +} + +// ResponseHeaderAttributes gather agent attributes from the response headers. +func ResponseHeaderAttributes(a *Attributes, h http.Header) { + if nil == h { + return + } + a.Agent.Add(attributeResponseHeadersContentType, h.Get("Content-Type"), nil) + + if l := GetContentLengthFromHeader(h); l >= 0 { + a.Agent.Add(attributeResponseHeadersContentLength, "", l) + } +} + +var ( + // statusCodeLookup avoids a strconv.Itoa call. + statusCodeLookup = map[int]string{ + 100: "100", 101: "101", + 200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206", + 300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307", + 400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406", + 407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413", + 414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429", + 431: "431", 451: "451", + 500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511", + } +) + +// ResponseCodeAttribute sets the response code agent attribute. +func ResponseCodeAttribute(a *Attributes, code int) { + rc := statusCodeLookup[code] + if rc == "" { + rc = strconv.Itoa(code) + } + a.Agent.Add(attributeResponseCode, rc, nil) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/browser.go b/vendor/github.com/newrelic/go-agent/internal/browser.go new file mode 100644 index 00000000000..a55456ad321 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/browser.go @@ -0,0 +1,18 @@ +package internal + +import "bytes" + +// BrowserAttributes returns a string with the attributes that are attached to +// the browser destination encoded in the JSON format expected by the Browser +// agent. +func BrowserAttributes(a *Attributes) []byte { + buf := &bytes.Buffer{} + + buf.WriteString(`{"u":`) + userAttributesJSON(a, buf, destBrowser, nil) + buf.WriteString(`,"a":`) + agentAttributesJSON(a, buf, destBrowser) + buf.WriteByte('}') + + return buf.Bytes() +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/appdata.go b/vendor/github.com/newrelic/go-agent/internal/cat/appdata.go new file mode 100644 index 00000000000..d62b71f2b47 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/appdata.go @@ -0,0 +1,111 @@ +package cat + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// AppDataHeader represents a decoded AppData header. +type AppDataHeader struct { + CrossProcessID string + TransactionName string + QueueTimeInSeconds float64 + ResponseTimeInSeconds float64 + ContentLength int64 + TransactionGUID string +} + +var ( + errInvalidAppDataJSON = errors.New("invalid transaction data JSON") + errInvalidAppDataCrossProcessID = errors.New("cross process ID is not a string") + errInvalidAppDataTransactionName = errors.New("transaction name is not a string") + errInvalidAppDataQueueTimeInSeconds = errors.New("queue time is not a float64") + errInvalidAppDataResponseTimeInSeconds = errors.New("response time is not a float64") + errInvalidAppDataContentLength = errors.New("content length is not a float64") + errInvalidAppDataTransactionGUID = errors.New("transaction GUID is not a string") +) + +// MarshalJSON marshalls an AppDataHeader as raw JSON. +func (appData *AppDataHeader) MarshalJSON() ([]byte, error) { + buf := bytes.NewBufferString("[") + + jsonx.AppendString(buf, appData.CrossProcessID) + + buf.WriteString(",") + jsonx.AppendString(buf, appData.TransactionName) + + buf.WriteString(",") + jsonx.AppendFloat(buf, appData.QueueTimeInSeconds) + + buf.WriteString(",") + jsonx.AppendFloat(buf, appData.ResponseTimeInSeconds) + + buf.WriteString(",") + jsonx.AppendInt(buf, appData.ContentLength) + + buf.WriteString(",") + jsonx.AppendString(buf, appData.TransactionGUID) + + // The mysterious unused field. We don't need to round trip this, so we'll + // just hardcode it to false. + buf.WriteString(",false]") + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls an AppDataHeader from raw JSON. +func (appData *AppDataHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidAppDataJSON + } + if len(arr) < 7 { + return errUnexpectedArraySize{ + label: "unexpected number of application data elements", + expected: 7, + actual: len(arr), + } + } + + if appData.CrossProcessID, ok = arr[0].(string); !ok { + return errInvalidAppDataCrossProcessID + } + + if appData.TransactionName, ok = arr[1].(string); !ok { + return errInvalidAppDataTransactionName + } + + if appData.QueueTimeInSeconds, ok = arr[2].(float64); !ok { + return errInvalidAppDataQueueTimeInSeconds + } + + if appData.ResponseTimeInSeconds, ok = arr[3].(float64); !ok { + return errInvalidAppDataResponseTimeInSeconds + } + + cl, ok := arr[4].(float64) + if !ok { + return errInvalidAppDataContentLength + } + // Content length is specced as int32, but not all agents are consistent on + // this in practice. Let's handle it as int64 to maximise compatibility. + appData.ContentLength = int64(cl) + + if appData.TransactionGUID, ok = arr[5].(string); !ok { + return errInvalidAppDataTransactionGUID + } + + // As above, we don't bother decoding the unused field here. It just has to + // be present (which was checked earlier with the length check). + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/errors.go b/vendor/github.com/newrelic/go-agent/internal/cat/errors.go new file mode 100644 index 00000000000..d19ce5183fb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/errors.go @@ -0,0 +1,15 @@ +package cat + +import ( + "fmt" +) + +type errUnexpectedArraySize struct { + label string + expected int + actual int +} + +func (e errUnexpectedArraySize) Error() string { + return fmt.Sprintf("%s: expected %d; got %d", e.label, e.expected, e.actual) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/headers.go b/vendor/github.com/newrelic/go-agent/internal/cat/headers.go new file mode 100644 index 00000000000..52586ed9337 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/headers.go @@ -0,0 +1,13 @@ +// Package cat provides functionality related to the wire format of CAT +// headers. +package cat + +// These header names don't match the spec in terms of their casing, but does +// match what Go will give us from http.CanonicalHeaderKey(). Besides, HTTP +// headers are case insensitive anyway. Rejoice! +const ( + NewRelicIDName = "X-Newrelic-Id" + NewRelicTxnName = "X-Newrelic-Transaction" + NewRelicAppDataName = "X-Newrelic-App-Data" + NewRelicSyntheticsName = "X-Newrelic-Synthetics" +) diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/id.go b/vendor/github.com/newrelic/go-agent/internal/cat/id.go new file mode 100644 index 00000000000..f8d3928ac19 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/id.go @@ -0,0 +1,41 @@ +package cat + +import ( + "errors" + "strconv" + "strings" +) + +// IDHeader represents a decoded cross process ID header (generally encoded as +// a string in the form ACCOUNT#BLOB). +type IDHeader struct { + AccountID int + Blob string +} + +var ( + errInvalidAccountID = errors.New("invalid account ID") +) + +// NewIDHeader parses the given decoded ID header and creates an IDHeader +// representing it. +func NewIDHeader(in []byte) (*IDHeader, error) { + parts := strings.Split(string(in), "#") + if len(parts) != 2 { + return nil, errUnexpectedArraySize{ + label: "unexpected number of ID elements", + expected: 2, + actual: len(parts), + } + } + + account, err := strconv.Atoi(parts[0]) + if err != nil { + return nil, errInvalidAccountID + } + + return &IDHeader{ + AccountID: account, + Blob: parts[1], + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/path_hash.go b/vendor/github.com/newrelic/go-agent/internal/cat/path_hash.go new file mode 100644 index 00000000000..34014464f11 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/path_hash.go @@ -0,0 +1,35 @@ +package cat + +import ( + "crypto/md5" + "encoding/binary" + "fmt" + "regexp" +) + +var pathHashValidator = regexp.MustCompile("^[0-9a-f]{8}$") + +// GeneratePathHash generates a path hash given a referring path hash, +// transaction name, and application name. referringPathHash can be an empty +// string if there was no referring path hash. +func GeneratePathHash(referringPathHash, txnName, appName string) (string, error) { + var rph uint32 + if referringPathHash != "" { + if !pathHashValidator.MatchString(referringPathHash) { + // Per the spec, invalid referring path hashes should be treated as "0". + referringPathHash = "0" + } + + if _, err := fmt.Sscanf(referringPathHash, "%x", &rph); err != nil { + fmt.Println(rph) + return "", err + } + rph = (rph << 1) | (rph >> 31) + } + + hashInput := fmt.Sprintf("%s;%s", appName, txnName) + hash := md5.Sum([]byte(hashInput)) + low32 := binary.BigEndian.Uint32(hash[12:]) + + return fmt.Sprintf("%08x", rph^low32), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/synthetics.go b/vendor/github.com/newrelic/go-agent/internal/cat/synthetics.go new file mode 100644 index 00000000000..3836f625b01 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/synthetics.go @@ -0,0 +1,82 @@ +package cat + +import ( + "encoding/json" + "errors" + "fmt" +) + +// SyntheticsHeader represents a decoded Synthetics header. +type SyntheticsHeader struct { + Version int + AccountID int + ResourceID string + JobID string + MonitorID string +} + +var ( + errInvalidSyntheticsJSON = errors.New("invalid synthetics JSON") + errInvalidSyntheticsVersion = errors.New("version is not a float64") + errInvalidSyntheticsAccountID = errors.New("account ID is not a float64") + errInvalidSyntheticsResourceID = errors.New("synthetics resource ID is not a string") + errInvalidSyntheticsJobID = errors.New("synthetics job ID is not a string") + errInvalidSyntheticsMonitorID = errors.New("synthetics monitor ID is not a string") +) + +type errUnexpectedSyntheticsVersion int + +func (e errUnexpectedSyntheticsVersion) Error() string { + return fmt.Sprintf("unexpected synthetics header version: %d", e) +} + +// UnmarshalJSON unmarshalls a SyntheticsHeader from raw JSON. +func (s *SyntheticsHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidSyntheticsJSON + } + if len(arr) != 5 { + return errUnexpectedArraySize{ + label: "unexpected number of application data elements", + expected: 5, + actual: len(arr), + } + } + + version, ok := arr[0].(float64) + if !ok { + return errInvalidSyntheticsVersion + } + s.Version = int(version) + if s.Version != 1 { + return errUnexpectedSyntheticsVersion(s.Version) + } + + accountID, ok := arr[1].(float64) + if !ok { + return errInvalidSyntheticsAccountID + } + s.AccountID = int(accountID) + + if s.ResourceID, ok = arr[2].(string); !ok { + return errInvalidSyntheticsResourceID + } + + if s.JobID, ok = arr[3].(string); !ok { + return errInvalidSyntheticsJobID + } + + if s.MonitorID, ok = arr[4].(string); !ok { + return errInvalidSyntheticsMonitorID + } + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/cat/txndata.go b/vendor/github.com/newrelic/go-agent/internal/cat/txndata.go new file mode 100644 index 00000000000..a766926a820 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cat/txndata.go @@ -0,0 +1,96 @@ +package cat + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// TxnDataHeader represents a decoded TxnData header. +type TxnDataHeader struct { + GUID string + TripID string + PathHash string +} + +var ( + errInvalidTxnDataJSON = errors.New("invalid transaction data JSON") + errInvalidTxnDataGUID = errors.New("GUID is not a string") + errInvalidTxnDataTripID = errors.New("trip ID is not a string or null") + errInvalidTxnDataPathHash = errors.New("path hash is not a string or null") +) + +// MarshalJSON marshalls a TxnDataHeader as raw JSON. +func (txnData *TxnDataHeader) MarshalJSON() ([]byte, error) { + // Note that, although there are two and four element versions of this header + // in the wild, we will only ever generate the four element version. + + buf := bytes.NewBufferString("[") + + jsonx.AppendString(buf, txnData.GUID) + + // Write the unused second field. + buf.WriteString(",false,") + jsonx.AppendString(buf, txnData.TripID) + + buf.WriteString(",") + jsonx.AppendString(buf, txnData.PathHash) + + buf.WriteString("]") + + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls a TxnDataHeader from raw JSON. +func (txnData *TxnDataHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidTxnDataJSON + } + if len(arr) < 2 { + return errUnexpectedArraySize{ + label: "unexpected number of transaction data elements", + expected: 2, + actual: len(arr), + } + } + + if txnData.GUID, ok = arr[0].(string); !ok { + return errInvalidTxnDataGUID + } + + // Ignore the unused second field. + + // Set up defaults for the optional values. + txnData.TripID = "" + txnData.PathHash = "" + + if len(arr) >= 3 { + // Per the cross agent tests, an explicit null is valid here. + if nil != arr[2] { + if txnData.TripID, ok = arr[2].(string); !ok { + return errInvalidTxnDataTripID + } + } + + if len(arr) >= 4 { + // Per the cross agent tests, an explicit null is also valid here. + if nil != arr[3] { + if txnData.PathHash, ok = arr[3].(string); !ok { + return errInvalidTxnDataPathHash + } + } + } + } + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/collector.go b/vendor/github.com/newrelic/go-agent/internal/collector.go new file mode 100644 index 00000000000..e2460a683eb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/collector.go @@ -0,0 +1,333 @@ +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + // ProcotolVersion is the protocol version used to communicate with NR + // backend. + ProcotolVersion = 17 + userAgentPrefix = "NewRelic-Go-Agent/" + + // Methods used in collector communication. + cmdPreconnect = "preconnect" + cmdConnect = "connect" + cmdMetrics = "metric_data" + cmdCustomEvents = "custom_event_data" + cmdTxnEvents = "analytic_event_data" + cmdErrorEvents = "error_event_data" + cmdErrorData = "error_data" + cmdTxnTraces = "transaction_sample_data" + cmdSlowSQLs = "sql_trace_data" + cmdSpanEvents = "span_event_data" +) + +// RpmCmd contains fields specific to an individual call made to RPM. +type RpmCmd struct { + Name string + Collector string + RunID string + Data []byte + RequestHeadersMap map[string]string + MaxPayloadSize int +} + +// RpmControls contains fields which will be the same for all calls made +// by the same application. +type RpmControls struct { + License string + Client *http.Client + Logger logger.Logger + AgentVersion string +} + +// RPMResponse contains a NR endpoint response. +// +// Agent Behavior Summary: +// +// on connect/preconnect: +// 410 means shutdown +// 200, 202 mean success (start run) +// all other response codes and errors mean try after backoff +// +// on harvest: +// 410 means shutdown +// 401, 409 mean restart run +// 408, 429, 500, 503 mean save data for next harvest +// all other response codes and errors discard the data and continue the current harvest +type RPMResponse struct { + statusCode int + body []byte + // Err indicates whether or not the call was successful: newRPMResponse + // should be used to avoid mismatch between statusCode and Err. + Err error + disconnectSecurityPolicy bool +} + +func newRPMResponse(statusCode int) RPMResponse { + var err error + if statusCode != 200 && statusCode != 202 { + err = fmt.Errorf("response code: %d", statusCode) + } + return RPMResponse{statusCode: statusCode, Err: err} +} + +// IsDisconnect indicates that the agent should disconnect. +func (resp RPMResponse) IsDisconnect() bool { + return resp.statusCode == 410 || resp.disconnectSecurityPolicy +} + +// IsRestartException indicates that the agent should restart. +func (resp RPMResponse) IsRestartException() bool { + return resp.statusCode == 401 || + resp.statusCode == 409 +} + +// ShouldSaveHarvestData indicates that the agent should save the data and try +// to send it in the next harvest. +func (resp RPMResponse) ShouldSaveHarvestData() bool { + switch resp.statusCode { + case 408, 429, 500, 503: + return true + default: + return false + } +} + +func rpmURL(cmd RpmCmd, cs RpmControls) string { + var u url.URL + + u.Host = cmd.Collector + u.Path = "agent_listener/invoke_raw_method" + u.Scheme = "https" + + query := url.Values{} + query.Set("marshal_format", "json") + query.Set("protocol_version", strconv.Itoa(ProcotolVersion)) + query.Set("method", cmd.Name) + query.Set("license_key", cs.License) + + if len(cmd.RunID) > 0 { + query.Set("run_id", cmd.RunID) + } + + u.RawQuery = query.Encode() + return u.String() +} + +func collectorRequestInternal(url string, cmd RpmCmd, cs RpmControls) RPMResponse { + compressed, err := compress(cmd.Data) + if nil != err { + return RPMResponse{Err: err} + } + + if l := compressed.Len(); l > cmd.MaxPayloadSize { + return RPMResponse{Err: fmt.Errorf("Payload size for %s too large: %d greater than %d", cmd.Name, l, cmd.MaxPayloadSize)} + } + + req, err := http.NewRequest("POST", url, compressed) + if nil != err { + return RPMResponse{Err: err} + } + + req.Header.Add("Accept-Encoding", "identity, deflate") + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion) + req.Header.Add("Content-Encoding", "gzip") + for k, v := range cmd.RequestHeadersMap { + req.Header.Add(k, v) + } + + resp, err := cs.Client.Do(req) + if err != nil { + return RPMResponse{Err: err} + } + + defer resp.Body.Close() + + r := newRPMResponse(resp.StatusCode) + + // Read the entire response, rather than using resp.Body as input to json.NewDecoder to + // avoid the issue described here: + // https://github.com/google/go-github/pull/317 + // https://ahmetalpbalkan.com/blog/golang-json-decoder-pitfalls/ + // Also, collector JSON responses are expected to be quite small. + body, err := ioutil.ReadAll(resp.Body) + if nil == r.Err { + r.Err = err + } + r.body = body + + return r +} + +// CollectorRequest makes a request to New Relic. +func CollectorRequest(cmd RpmCmd, cs RpmControls) RPMResponse { + url := rpmURL(cmd, cs) + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm request", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "payload": JSONString(cmd.Data), + }) + } + + resp := collectorRequestInternal(url, cmd, cs) + + if cs.Logger.DebugEnabled() { + if err := resp.Err; err != nil { + cs.Logger.Debug("rpm failure", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": string(resp.body), // Body might not be JSON on failure. + "error": err.Error(), + }) + } else { + cs.Logger.Debug("rpm response", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": JSONString(resp.body), + }) + } + } + + return resp +} + +const ( + // NEW_RELIC_HOST can be used to override the New Relic endpoint. This + // is useful for testing. + envHost = "NEW_RELIC_HOST" +) + +var ( + preconnectHostOverride = os.Getenv(envHost) + preconnectHostDefault = "collector.newrelic.com" + preconnectRegionLicenseRegex = regexp.MustCompile(`(^.+?)x`) +) + +func calculatePreconnectHost(license, overrideHost string) string { + if "" != overrideHost { + return overrideHost + } + m := preconnectRegionLicenseRegex.FindStringSubmatch(license) + if len(m) > 1 { + return "collector." + m[1] + ".nr-data.net" + } + return preconnectHostDefault +} + +// ConnectJSONCreator allows the creation of the connect payload JSON to be +// deferred until the SecurityPolicies are acquired and vetted. +type ConnectJSONCreator interface { + CreateConnectJSON(*SecurityPolicies) ([]byte, error) +} + +type preconnectRequest struct { + SecurityPoliciesToken string `json:"security_policies_token,omitempty"` + HighSecurity bool `json:"high_security"` +} + +var ( + errMissingAgentRunID = errors.New("connect reply missing agent run id") +) + +// ConnectAttempt tries to connect an application. +func ConnectAttempt(config ConnectJSONCreator, securityPoliciesToken string, highSecurity bool, cs RpmControls) (*ConnectReply, RPMResponse) { + preconnectData, err := json.Marshal([]preconnectRequest{{ + SecurityPoliciesToken: securityPoliciesToken, + HighSecurity: highSecurity, + }}) + if nil != err { + return nil, RPMResponse{Err: fmt.Errorf("unable to marshal preconnect data: %v", err)} + } + + call := RpmCmd{ + Name: cmdPreconnect, + Collector: calculatePreconnectHost(cs.License, preconnectHostOverride), + Data: preconnectData, + MaxPayloadSize: maxPayloadSizeInBytes, + } + + resp := CollectorRequest(call, cs) + if nil != resp.Err { + return nil, resp + } + + var preconnect struct { + Preconnect PreconnectReply `json:"return_value"` + } + err = json.Unmarshal(resp.body, &preconnect) + if nil != err { + // Certain security policy errors must be treated as a disconnect. + return nil, RPMResponse{ + Err: fmt.Errorf("unable to process preconnect reply: %v", err), + disconnectSecurityPolicy: isDisconnectSecurityPolicyError(err), + } + } + + js, err := config.CreateConnectJSON(preconnect.Preconnect.SecurityPolicies.PointerIfPopulated()) + if nil != err { + return nil, RPMResponse{Err: fmt.Errorf("unable to create connect data: %v", err)} + } + + call.Collector = preconnect.Preconnect.Collector + call.Data = js + call.Name = cmdConnect + + resp = CollectorRequest(call, cs) + if nil != resp.Err { + return nil, resp + } + + reply, err := constructConnectReply(resp.body, preconnect.Preconnect) + if nil != err { + return nil, RPMResponse{Err: err} + } + + // Note: This should never happen. It would mean the collector + // response is malformed. This exists merely as extra defensiveness. + if "" == reply.RunID { + return nil, RPMResponse{Err: errMissingAgentRunID} + } + + return reply, resp +} + +func constructConnectReply(body []byte, preconnect PreconnectReply) (*ConnectReply, error) { + var reply struct { + Reply *ConnectReply `json:"return_value"` + } + reply.Reply = ConnectReplyDefaults() + err := json.Unmarshal(body, &reply) + if nil != err { + return nil, fmt.Errorf("unable to parse connect reply: %v", err) + } + + reply.Reply.PreconnectReply = preconnect + + reply.Reply.AdaptiveSampler = NewAdaptiveSampler( + time.Duration(reply.Reply.SamplingTargetPeriodInSeconds)*time.Second, + reply.Reply.SamplingTarget, + time.Now()) + reply.Reply.rulesCache = newRulesCache(txnNameCacheLimit) + + if reply.Reply.EventData.EventReportPeriodMs <= 0 { + reply.Reply.EventData.EventReportPeriodMs = defaultConfigurableEventHarvestMs + } + + return reply.Reply, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/compress.go b/vendor/github.com/newrelic/go-agent/internal/compress.go new file mode 100644 index 00000000000..b20b9600f5e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/compress.go @@ -0,0 +1,19 @@ +package internal + +import ( + "bytes" + "compress/gzip" +) + +func compress(b []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + w := gzip.NewWriter(&buf) + _, err := w.Write(b) + w.Close() + + if nil != err { + return nil, err + } + + return &buf, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/connect_reply.go b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go new file mode 100644 index 00000000000..315f5e0f2e5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go @@ -0,0 +1,237 @@ +package internal + +import ( + "encoding/json" + "strings" + "time" +) + +// AgentRunID identifies the current connection with the collector. +type AgentRunID string + +func (id AgentRunID) String() string { + return string(id) +} + +// PreconnectReply contains settings from the preconnect endpoint. +type PreconnectReply struct { + Collector string `json:"redirect_host"` + SecurityPolicies SecurityPolicies `json:"security_policies"` +} + +// ConnectReply contains all of the settings and state send down from the +// collector. It should not be modified after creation. +type ConnectReply struct { + RunID AgentRunID `json:"agent_run_id"` + RequestHeadersMap map[string]string `json:"request_headers_map"` + MaxPayloadSizeInBytes int `json:"max_payload_size_in_bytes"` + EntityGUID string `json:"entity_guid"` + + // Transaction Name Modifiers + SegmentTerms segmentRules `json:"transaction_segment_terms"` + TxnNameRules metricRules `json:"transaction_name_rules"` + URLRules metricRules `json:"url_rules"` + MetricRules metricRules `json:"metric_name_rules"` + + // Cross Process + EncodingKey string `json:"encoding_key"` + CrossProcessID string `json:"cross_process_id"` + TrustedAccounts trustedAccountSet `json:"trusted_account_ids"` + + // Settings + KeyTxnApdex map[string]float64 `json:"web_transactions_apdex"` + ApdexThresholdSeconds float64 `json:"apdex_t"` + CollectAnalyticsEvents bool `json:"collect_analytics_events"` + CollectCustomEvents bool `json:"collect_custom_events"` + CollectTraces bool `json:"collect_traces"` + CollectErrors bool `json:"collect_errors"` + CollectErrorEvents bool `json:"collect_error_events"` + CollectSpanEvents bool `json:"collect_span_events"` + + // RUM + AgentLoader string `json:"js_agent_loader"` + Beacon string `json:"beacon"` + BrowserKey string `json:"browser_key"` + AppID string `json:"application_id"` + ErrorBeacon string `json:"error_beacon"` + JSAgentFile string `json:"js_agent_file"` + + // PreconnectReply fields are not in the connect reply, this embedding + // is done to simplify code. + PreconnectReply `json:"-"` + + Messages []struct { + Message string `json:"message"` + Level string `json:"level"` + } `json:"messages"` + + AdaptiveSampler AdaptiveSampler + // TraceIDGenerator creates random IDs for distributed tracing. It + // exists here in the connect reply so it can be modified to create + // deterministic identifiers in tests. + TraceIDGenerator *TraceIDGenerator `json:"-"` + + // BetterCAT/Distributed Tracing + AccountID string `json:"account_id"` + TrustedAccountKey string `json:"trusted_account_key"` + PrimaryAppID string `json:"primary_application_id"` + SamplingTarget uint64 `json:"sampling_target"` + SamplingTargetPeriodInSeconds int `json:"sampling_target_period_in_seconds"` + + // rulesCache caches the results of calling CreateFullTxnName. It + // exists here in ConnectReply since it is specific to a set of rules + // and is shared between transactions. + rulesCache *rulesCache + + ServerSideConfig struct { + TransactionTracerEnabled *bool `json:"transaction_tracer.enabled"` + // TransactionTracerThreshold should contain either a number or + // "apdex_f" if it is non-nil. + TransactionTracerThreshold interface{} `json:"transaction_tracer.transaction_threshold"` + TransactionTracerStackTraceThreshold *float64 `json:"transaction_tracer.stack_trace_threshold"` + ErrorCollectorEnabled *bool `json:"error_collector.enabled"` + ErrorCollectorIgnoreStatusCodes []int `json:"error_collector.ignore_status_codes"` + CrossApplicationTracerEnabled *bool `json:"cross_application_tracer.enabled"` + } `json:"agent_config"` + + // Faster Event Harvest + EventData EventHarvestConfig `json:"event_harvest_config"` +} + +// EventHarvestConfig contains fields relating to faster event harvest. +// This structure is used in the connect request (to send up defaults) +// and in the connect response (to get the server values). +// +// https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event_harvest_config-hash +// https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event-harvest-config +type EventHarvestConfig struct { + EventReportPeriodMs int `json:"report_period_ms"` + HarvestLimits struct { + TxnEvents uint `json:"analytic_event_data"` + CustomEvents uint `json:"custom_event_data"` + ErrorEvents uint `json:"error_event_data"` + } `json:"harvest_limits"` +} + +func (r *ConnectReply) getHarvestData() EventHarvestConfig { + if nil != r { + return r.EventData + } + return DefaultEventHarvestConfig() +} + +// DefaultEventHarvestConfig provides faster event harvest defaults. +func DefaultEventHarvestConfig() EventHarvestConfig { + cfg := EventHarvestConfig{} + cfg.EventReportPeriodMs = defaultConfigurableEventHarvestMs + cfg.HarvestLimits.TxnEvents = maxTxnEvents + cfg.HarvestLimits.CustomEvents = maxCustomEvents + cfg.HarvestLimits.ErrorEvents = maxErrorEvents + return cfg +} + +func (h EventHarvestConfig) eventReportPeriod() time.Duration { + return time.Duration(h.EventReportPeriodMs) * time.Millisecond +} + +type trustedAccountSet map[int]struct{} + +func (t *trustedAccountSet) IsTrusted(account int) bool { + _, exists := (*t)[account] + return exists +} + +func (t *trustedAccountSet) UnmarshalJSON(data []byte) error { + accounts := make([]int, 0) + if err := json.Unmarshal(data, &accounts); err != nil { + return err + } + + *t = make(trustedAccountSet) + for _, account := range accounts { + (*t)[account] = struct{}{} + } + + return nil +} + +// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper +// default settings. A pointer to a global is not used to prevent consumers +// from changing the default settings. +func ConnectReplyDefaults() *ConnectReply { + return &ConnectReply{ + ApdexThresholdSeconds: 0.5, + CollectAnalyticsEvents: true, + CollectCustomEvents: true, + CollectTraces: true, + CollectErrors: true, + CollectErrorEvents: true, + CollectSpanEvents: true, + MaxPayloadSizeInBytes: maxPayloadSizeInBytes, + // No transactions should be sampled before the application is + // connected. + AdaptiveSampler: SampleNothing{}, + + SamplingTarget: 10, + SamplingTargetPeriodInSeconds: 60, + + EventData: DefaultEventHarvestConfig(), + + TraceIDGenerator: NewTraceIDGenerator(int64(time.Now().UnixNano())), + } +} + +// CalculateApdexThreshold calculates the apdex threshold. +func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration { + if t, ok := c.KeyTxnApdex[txnName]; ok { + return FloatSecondsToDuration(t) + } + return FloatSecondsToDuration(c.ApdexThresholdSeconds) +} + +// CreateFullTxnName uses collector rules and the appropriate metric prefix to +// construct the full transaction metric name from the name given by the +// consumer. +func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + if name := reply.rulesCache.find(input, isWeb); "" != name { + return name + } + name := constructFullTxnName(input, reply, isWeb) + if "" != name { + // Note that we don't cache situations where the rules say + // ignore. It would increase complication (we would need to + // disambiguate not-found vs ignore). Also, the ignore code + // path is probably extremely uncommon. + reply.rulesCache.set(input, isWeb, name) + } + return name +} + +func constructFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + var afterURLRules string + if "" != input { + afterURLRules = reply.URLRules.Apply(input) + if "" == afterURLRules { + return "" + } + } + + prefix := backgroundMetricPrefix + if isWeb { + prefix = webMetricPrefix + } + + var beforeNameRules string + if strings.HasPrefix(afterURLRules, "/") { + beforeNameRules = prefix + afterURLRules + } else { + beforeNameRules = prefix + "/" + afterURLRules + } + + afterNameRules := reply.TxnNameRules.Apply(beforeNameRules) + if "" == afterNameRules { + return "" + } + + return reply.SegmentTerms.apply(afterNameRules) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/context.go b/vendor/github.com/newrelic/go-agent/internal/context.go new file mode 100644 index 00000000000..2b3cab688c6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/context.go @@ -0,0 +1,17 @@ +package internal + +type contextKeyType struct{} + +var ( + // TransactionContextKey is the key used for newrelic.FromContext and + // newrelic.NewContext. + TransactionContextKey = contextKeyType(struct{}{}) + + // GinTransactionContextKey is used as the context key in + // nrgin.Middleware and nrgin.Transaction. Unfortunately, Gin requires + // a string context key. We use two different context keys (and check + // both in nrgin.Transaction and newrelic.FromContext) rather than use a + // single string key because context.WithValue will fail golint if used + // with a string key. + GinTransactionContextKey = "newRelicTransaction" +) diff --git a/vendor/github.com/newrelic/go-agent/internal/cross_process_http.go b/vendor/github.com/newrelic/go-agent/internal/cross_process_http.go new file mode 100644 index 00000000000..73145c62c03 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/cross_process_http.go @@ -0,0 +1,67 @@ +package internal + +import ( + "net/http" + + "github.com/newrelic/go-agent/internal/cat" +) + +// InboundHTTPRequest adds the inbound request metadata to the TxnCrossProcess. +func (txp *TxnCrossProcess) InboundHTTPRequest(hdr http.Header) error { + return txp.handleInboundRequestHeaders(HTTPHeaderToMetadata(hdr)) +} + +// AppDataToHTTPHeader encapsulates the given appData value in the correct HTTP +// header. +func AppDataToHTTPHeader(appData string) http.Header { + header := http.Header{} + + if appData != "" { + header.Add(cat.NewRelicAppDataName, appData) + } + + return header +} + +// HTTPHeaderToAppData gets the appData value from the correct HTTP header. +func HTTPHeaderToAppData(header http.Header) string { + if header == nil { + return "" + } + + return header.Get(cat.NewRelicAppDataName) +} + +// HTTPHeaderToMetadata gets the cross process metadata from the relevant HTTP +// headers. +func HTTPHeaderToMetadata(header http.Header) CrossProcessMetadata { + if header == nil { + return CrossProcessMetadata{} + } + + return CrossProcessMetadata{ + ID: header.Get(cat.NewRelicIDName), + TxnData: header.Get(cat.NewRelicTxnName), + Synthetics: header.Get(cat.NewRelicSyntheticsName), + } +} + +// MetadataToHTTPHeader creates a set of HTTP headers to represent the given +// cross process metadata. +func MetadataToHTTPHeader(metadata CrossProcessMetadata) http.Header { + header := http.Header{} + + if metadata.ID != "" { + header.Add(cat.NewRelicIDName, metadata.ID) + } + + if metadata.TxnData != "" { + header.Add(cat.NewRelicTxnName, metadata.TxnData) + } + + if metadata.Synthetics != "" { + header.Add(cat.NewRelicSyntheticsName, metadata.Synthetics) + } + + return header +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_event.go b/vendor/github.com/newrelic/go-agent/internal/custom_event.go new file mode 100644 index 00000000000..20cf5918ae6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_event.go @@ -0,0 +1,103 @@ +package internal + +import ( + "bytes" + "fmt" + "regexp" + "time" +) + +// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + +var ( + eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$` + eventTypeRegex = regexp.MustCompile(eventTypeRegexRaw) + + errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d", + attributeKeyLengthLimit) + // ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent + // if the event type is not valid. + ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw) + errNumAttributes = fmt.Errorf("maximum of %d attributes exceeded", + customEventAttributeLimit) +) + +// CustomEvent is a custom event. +type CustomEvent struct { + eventType string + timestamp time.Time + truncatedParams map[string]interface{} +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", e.eventType) + w.floatField("timestamp", timeToFloatSeconds(e.timestamp)) + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + w = jsonFieldsWriter{buf: buf} + for key, val := range e.truncatedParams { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *CustomEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +func eventTypeValidate(eventType string) error { + if len(eventType) > attributeKeyLengthLimit { + return errEventTypeLength + } + if !eventTypeRegex.MatchString(eventType) { + return ErrEventTypeRegex + } + return nil +} + +// CreateCustomEvent creates a custom event. +func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) { + if err := eventTypeValidate(eventType); nil != err { + return nil, err + } + + if len(params) > customEventAttributeLimit { + return nil, errNumAttributes + } + + truncatedParams := make(map[string]interface{}) + for key, val := range params { + val, err := ValidateUserAttribute(key, val) + if nil != err { + return nil, err + } + truncatedParams[key] = val + } + + return &CustomEvent{ + eventType: eventType, + timestamp: now, + truncatedParams: truncatedParams, + }, nil +} + +// MergeIntoHarvest implements Harvestable. +func (e *CustomEvent) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.Add(e) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_events.go b/vendor/github.com/newrelic/go-agent/internal/custom_events.go new file mode 100644 index 00000000000..d58121aeb6f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_events.go @@ -0,0 +1,35 @@ +package internal + +import ( + "time" +) + +type customEvents struct { + *analyticsEvents +} + +func newCustomEvents(max int) *customEvents { + return &customEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (cs *customEvents) Add(e *CustomEvent) { + // For the Go Agent, customEvents are added to the application, not the transaction. + // As a result, customEvents do not inherit their priority from the transaction, though + // they are still sampled according to priority sampling. + priority := NewPriority() + cs.addEvent(analyticsEvent{priority, e}) +} + +func (cs *customEvents) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.mergeFailed(cs.analyticsEvents) +} + +func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return cs.CollectorJSON(agentRunID) +} + +func (cs *customEvents) EndpointMethod() string { + return cmdCustomEvents +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_metric.go b/vendor/github.com/newrelic/go-agent/internal/custom_metric.go new file mode 100644 index 00000000000..61600aea2b5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_metric.go @@ -0,0 +1,12 @@ +package internal + +// CustomMetric is a custom metric. +type CustomMetric struct { + RawInputName string + Value float64 +} + +// MergeIntoHarvest implements Harvestable. +func (m CustomMetric) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(customMetric(m.RawInputName), "", m.Value, unforced) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/distributed_tracing.go b/vendor/github.com/newrelic/go-agent/internal/distributed_tracing.go new file mode 100644 index 00000000000..82af9b6674d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/distributed_tracing.go @@ -0,0 +1,203 @@ +package internal + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +type distTraceVersion [2]int + +func (v distTraceVersion) major() int { return v[0] } +func (v distTraceVersion) minor() int { return v[1] } + +const ( + // CallerType is the Type field's value for outbound payloads. + CallerType = "App" +) + +var ( + currentDistTraceVersion = distTraceVersion([2]int{0 /* Major */, 1 /* Minor */}) + callerUnknown = payloadCaller{Type: "Unknown", App: "Unknown", Account: "Unknown", TransportType: "Unknown"} +) + +// timestampMillis allows raw payloads to use exact times, and marshalled +// payloads to use times in millis. +type timestampMillis time.Time + +func (tm *timestampMillis) UnmarshalJSON(data []byte) error { + var millis uint64 + if err := json.Unmarshal(data, &millis); nil != err { + return err + } + *tm = timestampMillis(timeFromUnixMilliseconds(millis)) + return nil +} + +func (tm timestampMillis) MarshalJSON() ([]byte, error) { + return json.Marshal(TimeToUnixMilliseconds(tm.Time())) +} + +func (tm timestampMillis) Time() time.Time { return time.Time(tm) } +func (tm *timestampMillis) Set(t time.Time) { *tm = timestampMillis(t) } + +// Payload is the distributed tracing payload. +type Payload struct { + payloadCaller + TransactionID string `json:"tx,omitempty"` + ID string `json:"id,omitempty"` + TracedID string `json:"tr"` + Priority Priority `json:"pr"` + Sampled *bool `json:"sa"` + Timestamp timestampMillis `json:"ti"` + TransportDuration time.Duration `json:"-"` +} + +type payloadCaller struct { + TransportType string `json:"-"` + Type string `json:"ty"` + App string `json:"ap"` + Account string `json:"ac"` + TrustedAccountKey string `json:"tk,omitempty"` +} + +// IsValid validates the payload data by looking for missing fields. +// Returns an error if there's a problem, nil if everything's fine +func (p Payload) IsValid() error { + + // If a payload is missing both `guid` and `transactionId` is received, + // a ParseException supportability metric should be generated. + if "" == p.TransactionID && "" == p.ID { + return ErrPayloadMissingField{message: "missing both guid/id and TransactionId/tx"} + } + + if "" == p.Type { + return ErrPayloadMissingField{message: "missing Type/ty"} + } + + if "" == p.Account { + return ErrPayloadMissingField{message: "missing Account/ac"} + } + + if "" == p.App { + return ErrPayloadMissingField{message: "missing App/ap"} + } + + if "" == p.TracedID { + return ErrPayloadMissingField{message: "missing TracedID/tr"} + } + + if p.Timestamp.Time().IsZero() || 0 == p.Timestamp.Time().Unix() { + return ErrPayloadMissingField{message: "missing Timestamp/ti"} + } + + return nil +} + +func (p Payload) text(v distTraceVersion) []byte { + js, _ := json.Marshal(struct { + Version distTraceVersion `json:"v"` + Data Payload `json:"d"` + }{ + Version: v, + Data: p, + }) + return js +} + +// Text implements newrelic.DistributedTracePayload. +func (p Payload) Text() string { + t := p.text(currentDistTraceVersion) + return string(t) +} + +// HTTPSafe implements newrelic.DistributedTracePayload. +func (p Payload) HTTPSafe() string { + t := p.text(currentDistTraceVersion) + return base64.StdEncoding.EncodeToString(t) +} + +// SetSampled lets us set a value for our *bool, +// which we can't do directly since a pointer +// needs something to point at. +func (p *Payload) SetSampled(sampled bool) { + p.Sampled = &sampled +} + +// ErrPayloadParse indicates that the payload was malformed. +type ErrPayloadParse struct{ err error } + +func (e ErrPayloadParse) Error() string { + return fmt.Sprintf("unable to parse inbound payload: %s", e.err.Error()) +} + +// ErrPayloadMissingField indicates there's a required field that's missing +type ErrPayloadMissingField struct{ message string } + +func (e ErrPayloadMissingField) Error() string { + return fmt.Sprintf("payload is missing required fields: %s", e.message) +} + +// ErrUnsupportedPayloadVersion indicates that the major version number is +// unknown. +type ErrUnsupportedPayloadVersion struct{ version int } + +func (e ErrUnsupportedPayloadVersion) Error() string { + return fmt.Sprintf("unsupported major version number %d", e.version) +} + +// AcceptPayload parses the inbound distributed tracing payload. +func AcceptPayload(p interface{}) (*Payload, error) { + var payload Payload + if byteSlice, ok := p.([]byte); ok { + p = string(byteSlice) + } + switch v := p.(type) { + case string: + if "" == v { + return nil, nil + } + var decoded []byte + if '{' == v[0] { + decoded = []byte(v) + } else { + var err error + decoded, err = base64.StdEncoding.DecodeString(v) + if nil != err { + return nil, ErrPayloadParse{err: err} + } + } + envelope := struct { + Version distTraceVersion `json:"v"` + Data json.RawMessage `json:"d"` + }{} + if err := json.Unmarshal(decoded, &envelope); nil != err { + return nil, ErrPayloadParse{err: err} + } + + if 0 == envelope.Version.major() && 0 == envelope.Version.minor() { + return nil, ErrPayloadMissingField{message: "missing v"} + } + + if envelope.Version.major() > currentDistTraceVersion.major() { + return nil, ErrUnsupportedPayloadVersion{ + version: envelope.Version.major(), + } + } + if err := json.Unmarshal(envelope.Data, &payload); nil != err { + return nil, ErrPayloadParse{err: err} + } + case Payload: + payload = v + default: + // Could be a shim payload (if the app is not yet connected). + return nil, nil + } + // Ensure that we don't have a reference to the input payload: we don't + // want to change it, it could be used multiple times. + alloc := new(Payload) + *alloc = payload + + return alloc, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/environment.go b/vendor/github.com/newrelic/go-agent/internal/environment.go new file mode 100644 index 00000000000..f7f27801226 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/environment.go @@ -0,0 +1,61 @@ +package internal + +import ( + "encoding/json" + "reflect" + "runtime" +) + +// Environment describes the application's environment. +type Environment struct { + Compiler string `env:"runtime.Compiler"` + GOARCH string `env:"runtime.GOARCH"` + GOOS string `env:"runtime.GOOS"` + Version string `env:"runtime.Version"` + NumCPU int `env:"runtime.NumCPU"` +} + +var ( + // SampleEnvironment is useful for testing. + SampleEnvironment = Environment{ + Compiler: "comp", + GOARCH: "arch", + GOOS: "goos", + Version: "vers", + NumCPU: 8, + } +) + +// NewEnvironment returns a new Environment. +func NewEnvironment() Environment { + return Environment{ + Compiler: runtime.Compiler, + GOARCH: runtime.GOARCH, + GOOS: runtime.GOOS, + Version: runtime.Version(), + NumCPU: runtime.NumCPU(), + } +} + +// MarshalJSON prepares Environment JSON in the format expected by the collector +// during the connect command. +func (e Environment) MarshalJSON() ([]byte, error) { + var arr [][]interface{} + + val := reflect.ValueOf(e) + numFields := val.NumField() + + arr = make([][]interface{}, numFields) + + for i := 0; i < numFields; i++ { + v := val.Field(i) + t := val.Type().Field(i).Tag.Get("env") + + arr[i] = []interface{}{ + t, + v.Interface(), + } + } + + return json.Marshal(arr) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/error_events.go b/vendor/github.com/newrelic/go-agent/internal/error_events.go new file mode 100644 index 00000000000..08f607dbef9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/error_events.go @@ -0,0 +1,64 @@ +package internal + +import ( + "bytes" + "time" +) + +// MarshalJSON is used for testing. +func (e *ErrorEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +// WriteJSON prepares JSON in the format expected by the collector. +// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md +func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "TransactionError") + w.stringField("error.class", e.Klass) + w.stringField("error.message", e.Msg) + w.floatField("timestamp", timeToFloatSeconds(e.When)) + w.stringField("transactionName", e.FinalName) + + sharedTransactionIntrinsics(&e.TxnEvent, &w) + sharedBetterCATIntrinsics(&e.TxnEvent, &w) + + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destError, e.ErrorData.ExtraAttributes) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(']') +} + +type errorEvents struct { + *analyticsEvents +} + +func newErrorEvents(max int) *errorEvents { + return &errorEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (events *errorEvents) Add(e *ErrorEvent, priority Priority) { + events.addEvent(analyticsEvent{priority, e}) +} + +func (events *errorEvents) MergeIntoHarvest(h *Harvest) { + h.ErrorEvents.mergeFailed(events.analyticsEvents) +} + +func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *errorEvents) EndpointMethod() string { + return cmdErrorEvents +} diff --git a/vendor/github.com/newrelic/go-agent/internal/errors.go b/vendor/github.com/newrelic/go-agent/internal/errors.go new file mode 100644 index 00000000000..23dec702991 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/errors.go @@ -0,0 +1,175 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +const ( + // PanicErrorKlass is the error klass used for errors generated by + // recovering panics in txn.End. + PanicErrorKlass = "panic" +) + +func panicValueMsg(v interface{}) string { + switch val := v.(type) { + case error: + return val.Error() + default: + return fmt.Sprintf("%v", v) + } +} + +// TxnErrorFromPanic creates a new TxnError from a panic. +func TxnErrorFromPanic(now time.Time, v interface{}) ErrorData { + return ErrorData{ + When: now, + Msg: panicValueMsg(v), + Klass: PanicErrorKlass, + } +} + +// TxnErrorFromResponseCode creates a new TxnError from an http response code. +func TxnErrorFromResponseCode(now time.Time, code int) ErrorData { + codeStr := strconv.Itoa(code) + msg := http.StatusText(code) + if msg == "" { + // Use a generic message if the code was not an http code + // to support gRPC. + msg = "response code " + codeStr + } + return ErrorData{ + When: now, + Msg: msg, + Klass: codeStr, + } +} + +// ErrorData contains the information about a recorded error. +type ErrorData struct { + When time.Time + Stack StackTrace + ExtraAttributes map[string]interface{} + Msg string + Klass string +} + +// TxnError combines error data with information about a transaction. TxnError is used for +// both error events and traced errors. +type TxnError struct { + ErrorData + TxnEvent +} + +// ErrorEvent and tracedError are separate types so that error events and traced errors can have +// different WriteJSON methods. +type ErrorEvent TxnError + +type tracedError TxnError + +// TxnErrors is a set of errors captured in a Transaction. +type TxnErrors []*ErrorData + +// NewTxnErrors returns a new empty TxnErrors. +func NewTxnErrors(max int) TxnErrors { + return make([]*ErrorData, 0, max) +} + +// Add adds a TxnError. +func (errors *TxnErrors) Add(e ErrorData) { + if len(*errors) < cap(*errors) { + *errors = append(*errors, &e) + } +} + +func (h *tracedError) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When)) + buf.WriteByte(',') + jsonx.AppendString(buf, h.FinalName) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Msg) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Klass) + buf.WriteByte(',') + + buf.WriteByte('{') + buf.WriteString(`"agentAttributes"`) + buf.WriteByte(':') + agentAttributesJSON(h.Attrs, buf, destError) + buf.WriteByte(',') + buf.WriteString(`"userAttributes"`) + buf.WriteByte(':') + userAttributesJSON(h.Attrs, buf, destError, h.ErrorData.ExtraAttributes) + buf.WriteByte(',') + buf.WriteString(`"intrinsics"`) + buf.WriteByte(':') + intrinsicsJSON(&h.TxnEvent, buf) + if nil != h.Stack { + buf.WriteByte(',') + buf.WriteString(`"stack_trace"`) + buf.WriteByte(':') + h.Stack.WriteJSON(buf) + } + buf.WriteByte('}') + + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (h *tracedError) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + h.WriteJSON(buf) + return buf.Bytes(), nil +} + +type harvestErrors []*tracedError + +func newHarvestErrors(max int) harvestErrors { + return make([]*tracedError, 0, max) +} + +// MergeTxnErrors merges a transaction's errors into the harvest's errors. +func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnEvent TxnEvent) { + for _, e := range errs { + if len(*errors) == cap(*errors) { + return + } + *errors = append(*errors, &tracedError{ + TxnEvent: txnEvent, + ErrorData: *e, + }) + } +} + +func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(errors) { + return nil, nil + } + estimate := 1024 * len(errors) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range errors { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (errors harvestErrors) MergeIntoHarvest(h *Harvest) {} + +func (errors harvestErrors) EndpointMethod() string { + return cmdErrorData +} diff --git a/vendor/github.com/newrelic/go-agent/internal/expect.go b/vendor/github.com/newrelic/go-agent/internal/expect.go new file mode 100644 index 00000000000..deff8f23833 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/expect.go @@ -0,0 +1,591 @@ +package internal + +import ( + "encoding/json" + "fmt" + "runtime" + + "time" +) + +var ( + // Unfortunately, the resolution of time.Now() on Windows is coarse: Two + // sequential calls to time.Now() may return the same value, and tests + // which expect non-zero durations may fail. To avoid adding sleep + // statements or mocking time.Now(), those tests are skipped on Windows. + doDurationTests = runtime.GOOS != `windows` +) + +// Validator is used for testing. +type Validator interface { + Error(...interface{}) +} + +func validateStringField(v Validator, fieldName, v1, v2 string) { + if v1 != v2 { + v.Error(fieldName, v1, v2) + } +} + +type addValidatorField struct { + field interface{} + original Validator +} + +func (a addValidatorField) Error(fields ...interface{}) { + fields = append([]interface{}{a.field}, fields...) + a.original.Error(fields...) +} + +// ExtendValidator is used to add more context to a validator. +func ExtendValidator(v Validator, field interface{}) Validator { + return addValidatorField{ + field: field, + original: v, + } +} + +// WantMetric is a metric expectation. If Data is nil, then any data values are +// acceptable. If Data has len 1, then only the metric count is validated. +type WantMetric struct { + Name string + Scope string + Forced interface{} // true, false, or nil + Data []float64 +} + +// WantError is a traced error expectation. +type WantError struct { + TxnName string + Msg string + Klass string + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +func uniquePointer() *struct{} { + s := struct{}{} + return &s +} + +var ( + // MatchAnything is for use when matching attributes. + MatchAnything = uniquePointer() +) + +// WantEvent is a transaction or error event expectation. +type WantEvent struct { + Intrinsics map[string]interface{} + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantTxnTrace is a transaction trace expectation. +type WantTxnTrace struct { + MetricName string + NumSegments int + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} + Intrinsics map[string]interface{} + // If the Root's SegmentName is populated then the segments will be + // tested, otherwise NumSegments will be tested. + Root WantTraceSegment +} + +// WantTraceSegment is a transaction trace segment expectation. +type WantTraceSegment struct { + SegmentName string + // RelativeStartMillis and RelativeStopMillis will be tested if they are + // provided: This makes it easy for top level tests which cannot + // control duration. + RelativeStartMillis interface{} + RelativeStopMillis interface{} + Attributes map[string]interface{} + Children []WantTraceSegment +} + +// WantSlowQuery is a slowQuery expectation. +type WantSlowQuery struct { + Count int32 + MetricName string + Query string + TxnName string + TxnURL string + DatabaseName string + Host string + PortPathOrID string + Params map[string]interface{} +} + +// HarvestTestinger is implemented by the app. It sets an empty test harvest +// and modifies the connect reply if a callback is provided. +type HarvestTestinger interface { + HarvestTesting(replyfn func(*ConnectReply)) +} + +// HarvestTesting allows integration packages to test instrumentation. +func HarvestTesting(app interface{}, replyfn func(*ConnectReply)) { + ta, ok := app.(HarvestTestinger) + if !ok { + panic("HarvestTesting type assertion failure") + } + ta.HarvestTesting(replyfn) +} + +// WantTxn provides the expectation parameters to ExpectTxnMetrics. +type WantTxn struct { + Name string + IsWeb bool + NumErrors int +} + +// ExpectTxnMetrics tests that the app contains metrics for a transaction. +func ExpectTxnMetrics(t Validator, mt *metricTable, want WantTxn) { + var metrics []WantMetric + var scope string + var allWebOther string + if want.IsWeb { + scope = "WebTransaction/Go/" + want.Name + allWebOther = "allWeb" + metrics = []WantMetric{ + {Name: "WebTransaction/Go/" + want.Name, Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + } + } else { + scope = "OtherTransaction/Go/" + want.Name + allWebOther = "allOther" + metrics = []WantMetric{ + {Name: "OtherTransaction/Go/" + want.Name, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + } + } + if want.NumErrors > 0 { + data := []float64{float64(want.NumErrors), 0, 0, 0, 0, 0} + metrics = append(metrics, []WantMetric{ + {Name: "Errors/all", Scope: "", Forced: true, Data: data}, + {Name: "Errors/" + allWebOther, Scope: "", Forced: true, Data: data}, + {Name: "Errors/" + scope, Scope: "", Forced: true, Data: data}, + }...) + } + ExpectMetrics(t, mt, metrics) +} + +// Expect exposes methods that allow for testing whether the correct data was +// captured. +type Expect interface { + ExpectCustomEvents(t Validator, want []WantEvent) + ExpectErrors(t Validator, want []WantError) + ExpectErrorEvents(t Validator, want []WantEvent) + + ExpectTxnEvents(t Validator, want []WantEvent) + + ExpectMetrics(t Validator, want []WantMetric) + ExpectMetricsPresent(t Validator, want []WantMetric) + ExpectTxnMetrics(t Validator, want WantTxn) + + ExpectTxnTraces(t Validator, want []WantTxnTrace) + ExpectSlowQueries(t Validator, want []WantSlowQuery) + + ExpectSpanEvents(t Validator, want []WantEvent) +} + +func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) { + if v1 != v2 { + t.Error("metric fields do not match", id, v1, v2, fieldName) + } +} + +// ExpectMetricsPresent allows testing of metrics without requiring an exact match +func ExpectMetricsPresent(t Validator, mt *metricTable, expect []WantMetric) { + expectMetrics(t, mt, expect, false) +} + +// ExpectMetrics allows testing of metrics. It passes if mt exactly matches expect. +func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) { + expectMetrics(t, mt, expect, true) +} + +func expectMetrics(t Validator, mt *metricTable, expect []WantMetric, exactMatch bool) { + if exactMatch { + if len(mt.metrics) != len(expect) { + t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + } + } + expectedIds := make(map[metricID]struct{}) + for _, e := range expect { + id := metricID{Name: e.Name, Scope: e.Scope} + expectedIds[id] = struct{}{} + m := mt.metrics[id] + if nil == m { + t.Error("unable to find metric", id) + continue + } + + if b, ok := e.Forced.(bool); ok { + if b != (forced == m.forced) { + t.Error("metric forced incorrect", b, m.forced, id) + } + } + + if nil != e.Data { + expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied") + + if len(e.Data) > 1 { + expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated") + expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed") + expectMetricField(t, id, e.Data[3], m.data.min, "min") + expectMetricField(t, id, e.Data[4], m.data.max, "max") + expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares") + } + } + } + if exactMatch { + for id := range mt.metrics { + if _, ok := expectedIds[id]; !ok { + t.Error("expected metrics does not contain", id.Name, id.Scope) + } + } + } +} + +func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) { + // TODO: This params comparison can be made smarter: Alert differences + // based on sub/super set behavior. + if len(exists) != len(expect) { + v.Error("attributes length difference", len(exists), len(expect)) + } + for key, val := range expect { + found, ok := exists[key] + if !ok { + v.Error("expected attribute not found: ", key) + continue + } + if val == MatchAnything { + continue + } + v1 := fmt.Sprint(found) + v2 := fmt.Sprint(val) + if v1 != v2 { + v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2) + } + } + for key, val := range exists { + _, ok := expect[key] + if !ok { + v.Error("unexpected attribute present: ", key, val) + continue + } + } +} + +// ExpectCustomEvents allows testing of custom events. It passes if cs exactly matches expect. +func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantEvent) { + expectEvents(v, cs.analyticsEvents, expect, nil) +} + +func expectEvent(v Validator, e json.Marshaler, expect WantEvent) { + js, err := e.MarshalJSON() + if nil != err { + v.Error("unable to marshal event", err) + return + } + var event []map[string]interface{} + err = json.Unmarshal(js, &event) + if nil != err { + v.Error("unable to parse event json", err) + return + } + intrinsics := event[0] + userAttributes := event[1] + agentAttributes := event[2] + + if nil != expect.Intrinsics { + expectAttributes(v, intrinsics, expect.Intrinsics) + } + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + } +} + +func expectEvents(v Validator, events *analyticsEvents, expect []WantEvent, extraAttributes map[string]interface{}) { + if len(events.events) != len(expect) { + v.Error("number of events does not match", len(events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events[i].jsonWriter.(json.Marshaler) + if !ok { + v.Error("event does not implement json.Marshaler") + continue + } + if nil != e.Intrinsics { + e.Intrinsics = mergeAttributes(extraAttributes, e.Intrinsics) + } + expectEvent(v, event, e) + } +} + +// Second attributes have priority. +func mergeAttributes(a1, a2 map[string]interface{}) map[string]interface{} { + a := make(map[string]interface{}) + for k, v := range a1 { + a[k] = v + } + for k, v := range a2 { + a[k] = v + } + return a +} + +// ExpectErrorEvents allows testing of error events. It passes if events exactly matches expect. +func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // error events: + "type": "TransactionError", + "timestamp": MatchAnything, + "duration": MatchAnything, + }) +} + +// ExpectSpanEvents allows testing of span events. It passes if events exactly matches expect. +func ExpectSpanEvents(v Validator, events *spanEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // span events: + "type": "Span", + "timestamp": MatchAnything, + "duration": MatchAnything, + "traceId": MatchAnything, + "guid": MatchAnything, + "transactionId": MatchAnything, + // All span events are currently sampled. + "sampled": true, + "priority": MatchAnything, + }) +} + +// ExpectTxnEvents allows testing of txn events. +func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // txn events: + "type": "Transaction", + "timestamp": MatchAnything, + "duration": MatchAnything, + "totalTime": MatchAnything, + "error": MatchAnything, + }) +} + +func expectError(v Validator, err *tracedError, expect WantError) { + validateStringField(v, "txnName", expect.TxnName, err.FinalName) + validateStringField(v, "klass", expect.Klass, err.Klass) + validateStringField(v, "msg", expect.Msg, err.Msg) + js, errr := err.MarshalJSON() + if nil != errr { + v.Error("unable to marshal error json", errr) + return + } + var unmarshalled []interface{} + errr = json.Unmarshal(js, &unmarshalled) + if nil != errr { + v.Error("unable to unmarshal error json", errr) + return + } + attributes := unmarshalled[4].(map[string]interface{}) + agentAttributes := attributes["agentAttributes"].(map[string]interface{}) + userAttributes := attributes["userAttributes"].(map[string]interface{}) + + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + } + if stack := attributes["stack_trace"]; nil == stack { + v.Error("missing error stack trace") + } +} + +// ExpectErrors allows testing of errors. +func ExpectErrors(v Validator, errors harvestErrors, expect []WantError) { + if len(errors) != len(expect) { + v.Error("number of errors mismatch", len(errors), len(expect)) + return + } + for i, e := range expect { + expectError(v, errors[i], e) + } +} + +func countSegments(node []interface{}) int { + count := 1 + children := node[4].([]interface{}) + for _, c := range children { + node := c.([]interface{}) + count += countSegments(node) + } + return count +} + +func expectTraceSegment(v Validator, nodeObj interface{}, expect WantTraceSegment) { + node := nodeObj.([]interface{}) + start := int(node[0].(float64)) + stop := int(node[1].(float64)) + name := node[2].(string) + attributes := node[3].(map[string]interface{}) + children := node[4].([]interface{}) + + validateStringField(v, "segmentName", expect.SegmentName, name) + if nil != expect.RelativeStartMillis { + expectStart, ok := expect.RelativeStartMillis.(int) + if !ok { + v.Error("invalid expect.RelativeStartMillis", expect.RelativeStartMillis) + } else if expectStart != start { + v.Error("segmentStartTime", expect.SegmentName, start, expectStart) + } + } + if nil != expect.RelativeStopMillis { + expectStop, ok := expect.RelativeStopMillis.(int) + if !ok { + v.Error("invalid expect.RelativeStopMillis", expect.RelativeStopMillis) + } else if expectStop != stop { + v.Error("segmentStopTime", expect.SegmentName, stop, expectStop) + } + } + if nil != expect.Attributes { + expectAttributes(v, attributes, expect.Attributes) + } + if len(children) != len(expect.Children) { + v.Error("segmentChildrenCount", expect.SegmentName, len(children), len(expect.Children)) + } else { + for idx, child := range children { + expectTraceSegment(v, child, expect.Children[idx]) + } + } +} + +func expectTxnTrace(v Validator, got interface{}, expect WantTxnTrace) { + unmarshalled := got.([]interface{}) + duration := unmarshalled[1].(float64) + name := unmarshalled[2].(string) + var arrayURL string + if nil != unmarshalled[3] { + arrayURL = unmarshalled[3].(string) + } + traceData := unmarshalled[4].([]interface{}) + + rootNode := traceData[3].([]interface{}) + attributes := traceData[4].(map[string]interface{}) + userAttributes := attributes["userAttributes"].(map[string]interface{}) + agentAttributes := attributes["agentAttributes"].(map[string]interface{}) + intrinsics := attributes["intrinsics"].(map[string]interface{}) + + validateStringField(v, "metric name", expect.MetricName, name) + + if doDurationTests && 0 == duration { + v.Error("zero trace duration") + } + + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + expectURL, _ := expect.AgentAttributes["request.uri"].(string) + if "" != expectURL { + validateStringField(v, "request url in array", expectURL, arrayURL) + } + } + if nil != expect.Intrinsics { + expectAttributes(v, intrinsics, expect.Intrinsics) + } + if expect.Root.SegmentName != "" { + expectTraceSegment(v, rootNode, expect.Root) + } else { + numSegments := countSegments(rootNode) + // The expectation segment count does not include the two root nodes. + numSegments -= 2 + if expect.NumSegments != numSegments { + v.Error("wrong number of segments", expect.NumSegments, numSegments) + } + } +} + +// ExpectTxnTraces allows testing of transaction traces. +func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) { + if len(want) != traces.Len() { + v.Error("number of traces do not match", len(want), traces.Len()) + return + } + if len(want) == 0 { + return + } + js, err := traces.Data("agentRunID", time.Now()) + if nil != err { + v.Error("error creasing harvest traces data", err) + return + } + + var unmarshalled []interface{} + err = json.Unmarshal(js, &unmarshalled) + if nil != err { + v.Error("unable to unmarshal error json", err) + return + } + if "agentRunID" != unmarshalled[0].(string) { + v.Error("traces agent run id wrong", unmarshalled[0]) + return + } + gotTraces := unmarshalled[1].([]interface{}) + if len(gotTraces) != len(want) { + v.Error("number of traces in json does not match", len(gotTraces), len(want)) + return + } + for i, expected := range want { + expectTxnTrace(v, gotTraces[i], expected) + } +} + +func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) { + if slowQuery.Count != want.Count { + t.Error("wrong Count field", slowQuery.Count, want.Count) + } + uri, _ := slowQuery.TxnEvent.Attrs.GetAgentValue(attributeRequestURI, destTxnTrace) + validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName) + validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query) + validateStringField(t, "TxnEvent.FinalName", slowQuery.TxnEvent.FinalName, want.TxnName) + validateStringField(t, "request.uri", uri, want.TxnURL) + validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName) + validateStringField(t, "Host", slowQuery.Host, want.Host) + validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID) + expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params) +} + +// ExpectSlowQueries allows testing of slow queries. +func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) { + if len(want) != len(slowQueries.priorityQueue) { + t.Error("wrong number of slow queries", + "expected", len(want), "got", len(slowQueries.priorityQueue)) + return + } + for _, s := range want { + idx, ok := slowQueries.lookup[s.Query] + if !ok { + t.Error("unable to find slow query", s.Query) + continue + } + expectSlowQuery(t, slowQueries.priorityQueue[idx], s) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/harvest.go b/vendor/github.com/newrelic/go-agent/internal/harvest.go new file mode 100644 index 00000000000..35f9849fd5b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/harvest.go @@ -0,0 +1,317 @@ +package internal + +import ( + "strings" + "sync" + "time" +) + +// Harvestable is something that can be merged into a Harvest. +type Harvestable interface { + MergeIntoHarvest(h *Harvest) +} + +type harvestTimer struct { + lastHarvest time.Time + period time.Duration +} + +func newHarvestTimer(now time.Time, period time.Duration) harvestTimer { + return harvestTimer{ + lastHarvest: now, + period: period, + } +} + +func (timer *harvestTimer) ready(now time.Time) bool { + deadline := timer.lastHarvest.Add(timer.period) + if now.After(deadline) { + timer.lastHarvest = deadline + return true + } + return false +} + +// Harvest contains collected data. +type Harvest struct { + configurableHarvestTimer harvestTimer + fixedHarvestTimer harvestTimer + + // fixedHarvest and configurableHarvest are non-nil in the main Harvest + // used in app.process(), but may be nil in the Harvest returned by + // Harvest.Ready(). + *fixedHarvest + *configurableHarvest +} + +type fixedHarvest struct { + Metrics *metricTable + ErrorTraces harvestErrors + TxnTraces *harvestTraces + SlowSQLs *slowQueries + SpanEvents *spanEvents +} + +type configurableHarvest struct { + CustomEvents *customEvents + TxnEvents *txnEvents + ErrorEvents *errorEvents +} + +const ( + // txnEventPayloadlimit is the maximum number of events that should be + // sent up in one post. + txnEventPayloadlimit = 5000 +) + +// Ready returns a new Harvest which contains the data types ready for harvest, +// or nil if no data is ready for harvest. +func (h *Harvest) Ready(now time.Time, reply *ConnectReply) *Harvest { + ready := &Harvest{} + + if h.configurableHarvestTimer.ready(now) { + h.Metrics.addCount(customEventsSeen, h.CustomEvents.NumSeen(), forced) + h.Metrics.addCount(customEventsSent, h.CustomEvents.NumSaved(), forced) + + h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced) + h.Metrics.addCount(txnEventsSent, h.TxnEvents.NumSaved(), forced) + + h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.NumSeen(), forced) + h.Metrics.addCount(errorEventsSent, h.ErrorEvents.NumSaved(), forced) + + ready.configurableHarvest = h.configurableHarvest + h.configurableHarvest = newConfigurableHarvest(now, reply) + } + + // NOTE! This must happen after the configurable harvest conditional to + // ensure that the metrics contain the event supportability metrics. + if h.fixedHarvestTimer.ready(now) { + h.Metrics.addCount(spanEventsSeen, h.SpanEvents.NumSeen(), forced) + h.Metrics.addCount(spanEventsSent, h.SpanEvents.NumSaved(), forced) + + ready.fixedHarvest = h.fixedHarvest + h.fixedHarvest = newFixedHarvest(now) + } + + if nil == ready.fixedHarvest && nil == ready.configurableHarvest { + return nil + } + return ready +} + +func (h *configurableHarvest) payloads(splitLargeTxnEvents bool) []PayloadCreator { + if nil == h { + return nil + } + ps := []PayloadCreator{ + h.CustomEvents, + h.ErrorEvents, + } + if splitLargeTxnEvents { + ps = append(ps, h.TxnEvents.payloads(txnEventPayloadlimit)...) + } else { + ps = append(ps, h.TxnEvents) + } + return ps +} + +func (h *fixedHarvest) payloads() []PayloadCreator { + if nil == h { + return nil + } + return []PayloadCreator{ + h.Metrics, + h.ErrorTraces, + h.TxnTraces, + h.SlowSQLs, + h.SpanEvents, + } +} + +// Payloads returns a map from expected collector method name to data type. +func (h *Harvest) Payloads(splitLargeTxnEvents bool) []PayloadCreator { + if nil == h { + return nil + } + var ps []PayloadCreator + ps = append(ps, h.configurableHarvest.payloads(splitLargeTxnEvents)...) + ps = append(ps, h.fixedHarvest.payloads()...) + return ps +} + +func newFixedHarvest(now time.Time) *fixedHarvest { + return &fixedHarvest{ + Metrics: newMetricTable(maxMetrics, now), + ErrorTraces: newHarvestErrors(maxHarvestErrors), + TxnTraces: newHarvestTraces(), + SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), + SpanEvents: newSpanEvents(maxSpanEvents), + } +} + +func newConfigurableHarvest(now time.Time, reply *ConnectReply) *configurableHarvest { + harvestData := reply.getHarvestData() + return &configurableHarvest{ + CustomEvents: newCustomEvents(int(harvestData.HarvestLimits.CustomEvents)), + TxnEvents: newTxnEvents(int(harvestData.HarvestLimits.TxnEvents)), + ErrorEvents: newErrorEvents(int(harvestData.HarvestLimits.ErrorEvents)), + } +} + +// NewHarvest returns a new Harvest. +func NewHarvest(now time.Time, reply *ConnectReply) *Harvest { + harvestData := reply.getHarvestData() + return &Harvest{ + configurableHarvestTimer: newHarvestTimer(now, harvestData.eventReportPeriod()), + fixedHarvestTimer: newHarvestTimer(now, fixedHarvestPeriod), + + configurableHarvest: newConfigurableHarvest(now, reply), + fixedHarvest: newFixedHarvest(now), + } +} + +var ( + trackMutex sync.Mutex + trackMetrics []string +) + +// TrackUsage helps track which integration packages are used. +func TrackUsage(s ...string) { + trackMutex.Lock() + defer trackMutex.Unlock() + + m := "Supportability/" + strings.Join(s, "/") + trackMetrics = append(trackMetrics, m) +} + +func createTrackUsageMetrics(metrics *metricTable) { + trackMutex.Lock() + defer trackMutex.Unlock() + + for _, m := range trackMetrics { + metrics.addSingleCount(m, forced) + } +} + +// CreateFinalMetrics creates extra metrics at harvest time. +func (h *fixedHarvest) CreateFinalMetrics(reply *ConnectReply) { + if nil == h { + return + } + + h.Metrics.addSingleCount(instanceReporting, forced) + + // Configurable event harvest supportability metrics: + // https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event-harvest-config + hd := reply.getHarvestData() + period := hd.eventReportPeriod() + h.Metrics.addDuration(supportReportPeriod, "", period, period, forced) + h.Metrics.addValue(supportTxnEventLimit, "", float64(hd.HarvestLimits.TxnEvents), forced) + h.Metrics.addValue(supportCustomEventLimit, "", float64(hd.HarvestLimits.CustomEvents), forced) + h.Metrics.addValue(supportErrorEventLimit, "", float64(hd.HarvestLimits.ErrorEvents), forced) + + createTrackUsageMetrics(h.Metrics) + + h.Metrics = h.Metrics.ApplyRules(reply.MetricRules) +} + +// PayloadCreator is a data type in the harvest. +type PayloadCreator interface { + // In the event of a rpm request failure (hopefully simply an + // intermittent collector issue) the payload may be merged into the next + // time period's harvest. + Harvestable + // Data prepares JSON in the format expected by the collector endpoint. + // This method should return (nil, nil) if the payload is empty and no + // rpm request is necessary. + Data(agentRunID string, harvestStart time.Time) ([]byte, error) + // EndpointMethod is used for the "method" query parameter when posting + // the data. + EndpointMethod() string +} + +func supportMetric(metrics *metricTable, b bool, metricName string) { + if b { + metrics.addSingleCount(metricName, forced) + } +} + +// CreateTxnMetrics creates metrics for a transaction. +func CreateTxnMetrics(args *TxnData, metrics *metricTable) { + withoutFirstSegment := removeFirstSegment(args.FinalName) + + // Duration Metrics + var durationRollup string + var totalTimeRollup string + if args.IsWeb { + durationRollup = webRollup + totalTimeRollup = totalTimeWeb + metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced) + } else { + durationRollup = backgroundRollup + totalTimeRollup = totalTimeBackground + } + + metrics.addDuration(args.FinalName, "", args.Duration, 0, forced) + metrics.addDuration(durationRollup, "", args.Duration, 0, forced) + + metrics.addDuration(totalTimeRollup, "", args.TotalTime, args.TotalTime, forced) + metrics.addDuration(totalTimeRollup+"/"+withoutFirstSegment, "", args.TotalTime, args.TotalTime, unforced) + + // Better CAT Metrics + if cat := args.BetterCAT; cat.Enabled { + caller := callerUnknown + if nil != cat.Inbound { + caller = cat.Inbound.payloadCaller + } + m := durationByCallerMetric(caller) + metrics.addDuration(m.all, "", args.Duration, args.Duration, unforced) + metrics.addDuration(m.webOrOther(args.IsWeb), "", args.Duration, args.Duration, unforced) + + // Transport Duration Metric + if nil != cat.Inbound { + d := cat.Inbound.TransportDuration + m = transportDurationMetric(caller) + metrics.addDuration(m.all, "", d, d, unforced) + metrics.addDuration(m.webOrOther(args.IsWeb), "", d, d, unforced) + } + + // CAT Error Metrics + if args.HasErrors() { + m = errorsByCallerMetric(caller) + metrics.addSingleCount(m.all, unforced) + metrics.addSingleCount(m.webOrOther(args.IsWeb), unforced) + } + + supportMetric(metrics, args.AcceptPayloadSuccess, supportTracingAcceptSuccess) + supportMetric(metrics, args.AcceptPayloadException, supportTracingAcceptException) + supportMetric(metrics, args.AcceptPayloadParseException, supportTracingAcceptParseException) + supportMetric(metrics, args.AcceptPayloadCreateBeforeAccept, supportTracingCreateBeforeAccept) + supportMetric(metrics, args.AcceptPayloadIgnoredMultiple, supportTracingIgnoredMultiple) + supportMetric(metrics, args.AcceptPayloadIgnoredVersion, supportTracingIgnoredVersion) + supportMetric(metrics, args.AcceptPayloadUntrustedAccount, supportTracingAcceptUntrustedAccount) + supportMetric(metrics, args.AcceptPayloadNullPayload, supportTracingAcceptNull) + supportMetric(metrics, args.CreatePayloadSuccess, supportTracingCreatePayloadSuccess) + supportMetric(metrics, args.CreatePayloadException, supportTracingCreatePayloadException) + } + + // Apdex Metrics + if args.Zone != ApdexNone { + metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced) + + mname := apdexPrefix + withoutFirstSegment + metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced) + } + + // Error Metrics + if args.HasErrors() { + metrics.addSingleCount(errorsRollupMetric.all, forced) + metrics.addSingleCount(errorsRollupMetric.webOrOther(args.IsWeb), forced) + metrics.addSingleCount(errorsPrefix+args.FinalName, forced) + } + + // Queueing Metrics + if args.Queuing > 0 { + metrics.addDuration(queueMetric, "", args.Queuing, args.Queuing, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/intrinsics.go b/vendor/github.com/newrelic/go-agent/internal/intrinsics.go new file mode 100644 index 00000000000..6925e49ff70 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/intrinsics.go @@ -0,0 +1,41 @@ +package internal + +import ( + "bytes" +) + +func addOptionalStringField(w *jsonFieldsWriter, key, value string) { + if value != "" { + w.stringField(key, value) + } +} + +func intrinsicsJSON(e *TxnEvent, buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + + buf.WriteByte('{') + + w.floatField("totalTime", e.TotalTime.Seconds()) + + if e.BetterCAT.Enabled { + w.stringField("guid", e.BetterCAT.ID) + w.stringField("traceId", e.BetterCAT.TraceID()) + w.writerField("priority", e.BetterCAT.Priority) + w.boolField("sampled", e.BetterCAT.Sampled) + } + + if e.CrossProcess.Used() { + addOptionalStringField(&w, "client_cross_process_id", e.CrossProcess.ClientID) + addOptionalStringField(&w, "trip_id", e.CrossProcess.TripID) + addOptionalStringField(&w, "path_hash", e.CrossProcess.PathHash) + addOptionalStringField(&w, "referring_transaction_guid", e.CrossProcess.ReferringTxnGUID) + } + + if e.CrossProcess.IsSynthetics() { + addOptionalStringField(&w, "synthetics_resource_id", e.CrossProcess.Synthetics.ResourceID) + addOptionalStringField(&w, "synthetics_job_id", e.CrossProcess.Synthetics.JobID) + addOptionalStringField(&w, "synthetics_monitor_id", e.CrossProcess.Synthetics.MonitorID) + } + + buf.WriteByte('}') +} diff --git a/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go new file mode 100644 index 00000000000..e9533f14126 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go @@ -0,0 +1,61 @@ +package internal + +import ( + "bytes" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type jsonWriter interface { + WriteJSON(buf *bytes.Buffer) +} + +type jsonFieldsWriter struct { + buf *bytes.Buffer + needsComma bool +} + +func (w *jsonFieldsWriter) addKey(key string) { + if w.needsComma { + w.buf.WriteByte(',') + } else { + w.needsComma = true + } + // defensively assume that the key needs escaping: + jsonx.AppendString(w.buf, key) + w.buf.WriteByte(':') +} + +func (w *jsonFieldsWriter) stringField(key string, val string) { + w.addKey(key) + jsonx.AppendString(w.buf, val) +} + +func (w *jsonFieldsWriter) intField(key string, val int64) { + w.addKey(key) + jsonx.AppendInt(w.buf, val) +} + +func (w *jsonFieldsWriter) floatField(key string, val float64) { + w.addKey(key) + jsonx.AppendFloat(w.buf, val) +} + +func (w *jsonFieldsWriter) boolField(key string, val bool) { + w.addKey(key) + if val { + w.buf.WriteString("true") + } else { + w.buf.WriteString("false") + } +} + +func (w *jsonFieldsWriter) rawField(key string, val JSONString) { + w.addKey(key) + w.buf.WriteString(string(val)) +} + +func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) { + w.addKey(key) + val.WriteJSON(w.buf) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go new file mode 100644 index 00000000000..6495829f784 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go @@ -0,0 +1,174 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonx extends the encoding/json package to encode JSON +// incrementally and without requiring reflection. +package jsonx + +import ( + "bytes" + "encoding/json" + "math" + "reflect" + "strconv" + "unicode/utf8" +) + +var hex = "0123456789abcdef" + +// AppendString escapes s appends it to buf. +func AppendString(buf *bytes.Buffer, s string) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, > and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} + +// AppendStringArray appends an array of string literals to buf. +func AppendStringArray(buf *bytes.Buffer, a ...string) { + buf.WriteByte('[') + for i, s := range a { + if i > 0 { + buf.WriteByte(',') + } + AppendString(buf, s) + } + buf.WriteByte(']') +} + +// AppendFloat appends a numeric literal representing the value to buf. +func AppendFloat(buf *bytes.Buffer, x float64) error { + var scratch [64]byte + + if math.IsInf(x, 0) || math.IsNaN(x) { + return &json.UnsupportedValueError{ + Value: reflect.ValueOf(x), + Str: strconv.FormatFloat(x, 'g', -1, 64), + } + } + + buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64)) + return nil +} + +// AppendFloatArray appends an array of numeric literals to buf. +func AppendFloatArray(buf *bytes.Buffer, a ...float64) error { + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + if err := AppendFloat(buf, x); err != nil { + return err + } + } + buf.WriteByte(']') + return nil +} + +// AppendInt appends a numeric literal representing the value to buf. +func AppendInt(buf *bytes.Buffer, x int64) { + var scratch [64]byte + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) +} + +// AppendIntArray appends an array of numeric literals to buf. +func AppendIntArray(buf *bytes.Buffer, a ...int64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} + +// AppendUint appends a numeric literal representing the value to buf. +func AppendUint(buf *bytes.Buffer, x uint64) { + var scratch [64]byte + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) +} + +// AppendUintArray appends an array of numeric literals to buf. +func AppendUintArray(buf *bytes.Buffer, a ...uint64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} diff --git a/vendor/github.com/newrelic/go-agent/internal/labels.go b/vendor/github.com/newrelic/go-agent/internal/labels.go new file mode 100644 index 00000000000..b3671c65c90 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/labels.go @@ -0,0 +1,23 @@ +package internal + +import "encoding/json" + +// Labels is used for connect JSON formatting. +type Labels map[string]string + +// MarshalJSON requires a comment for golint? +func (l Labels) MarshalJSON() ([]byte, error) { + ls := make([]struct { + Key string `json:"label_type"` + Value string `json:"label_value"` + }, len(l)) + + i := 0 + for key, val := range l { + ls[i].Key = key + ls[i].Value = val + i++ + } + + return json.Marshal(ls) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/limits.go b/vendor/github.com/newrelic/go-agent/internal/limits.go new file mode 100644 index 00000000000..04cf98fdeff --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/limits.go @@ -0,0 +1,68 @@ +package internal + +import "time" + +const ( + // app behavior + + // fixedHarvestPeriod is the period that fixed period data (metrics, + // traces, and span events) is sent to New Relic. + fixedHarvestPeriod = 60 * time.Second + // defaultConfigurableEventHarvestMs is the period for custom, error, + // and transaction events if the connect response's + // "event_harvest_config.report_period_ms" is missing or invalid. + defaultConfigurableEventHarvestMs = 60 * 1000 + // CollectorTimeout is the timeout used in the client for communication + // with New Relic's servers. + CollectorTimeout = 20 * time.Second + // AppDataChanSize is the size of the channel that contains data sent + // the app processor. + AppDataChanSize = 200 + failedMetricAttemptsLimit = 5 + failedEventsAttemptsLimit = 10 + // maxPayloadSizeInBytes specifies the maximum payload size in bytes that + // should be sent to any endpoint + maxPayloadSizeInBytes = 1000 * 1000 + + // transaction behavior + maxStackTraceFrames = 100 + // MaxTxnErrors is the maximum number of errors captured per + // transaction. + MaxTxnErrors = 5 + maxTxnSlowQueries = 10 + + startingTxnTraceNodes = 16 + maxTxnTraceNodes = 256 + + // harvest data + maxMetrics = 2 * 1000 + maxCustomEvents = 10 * 1000 + maxTxnEvents = 10 * 1000 + maxRegularTraces = 1 + maxSyntheticsTraces = 20 + maxErrorEvents = 100 + maxHarvestErrors = 20 + maxHarvestSlowSQLs = 10 + maxSpanEvents = 1000 + + // attributes + attributeKeyLengthLimit = 255 + attributeValueLengthLimit = 255 + attributeUserLimit = 64 + // AttributeErrorLimit limits the number of extra attributes that can be + // provided when noticing an error. + AttributeErrorLimit = 32 + attributeAgentLimit = 255 - (attributeUserLimit + AttributeErrorLimit) + customEventAttributeLimit = 64 + + // Limits affecting Config validation are found in the config package. + + // RuntimeSamplerPeriod is the period of the runtime sampler. Runtime + // metrics should not depend on the sampler period, but the period must + // be the same across instances. For that reason, this value should not + // be changed without notifying customers that they must update all + // instance simultaneously for valid runtime metrics. + RuntimeSamplerPeriod = 60 * time.Second + + txnNameCacheLimit = 40 +) diff --git a/vendor/github.com/newrelic/go-agent/internal/logger/logger.go b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go new file mode 100644 index 00000000000..9fda99da54c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go @@ -0,0 +1,93 @@ +package logger + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// Logger matches newrelic.Logger to allow implementations to be passed to +// internal packages. +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// ShimLogger implements Logger and does nothing. +type ShimLogger struct { + // IsDebugEnabled is useful as it allows DebugEnabled code paths to be + // tested. + IsDebugEnabled bool +} + +// Error allows ShimLogger to implement Logger. +func (s ShimLogger) Error(string, map[string]interface{}) {} + +// Warn allows ShimLogger to implement Logger. +func (s ShimLogger) Warn(string, map[string]interface{}) {} + +// Info allows ShimLogger to implement Logger. +func (s ShimLogger) Info(string, map[string]interface{}) {} + +// Debug allows ShimLogger to implement Logger. +func (s ShimLogger) Debug(string, map[string]interface{}) {} + +// DebugEnabled allows ShimLogger to implement Logger. +func (s ShimLogger) DebugEnabled() bool { return s.IsDebugEnabled } + +type logFile struct { + l *log.Logger + doDebug bool +} + +// New creates a basic Logger. +func New(w io.Writer, doDebug bool) Logger { + return &logFile{ + l: log.New(w, logPid, logFlags), + doDebug: doDebug, + } +} + +const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds + +var ( + logPid = fmt.Sprintf("(%d) ", os.Getpid()) +) + +func (f *logFile) fire(level, msg string, ctx map[string]interface{}) { + js, err := json.Marshal(struct { + Level string `json:"level"` + Event string `json:"msg"` + Context map[string]interface{} `json:"context"` + }{ + level, + msg, + ctx, + }) + if nil == err { + f.l.Print(string(js)) + } else { + f.l.Printf("unable to marshal log entry: %v", err) + } +} + +func (f *logFile) Error(msg string, ctx map[string]interface{}) { + f.fire("error", msg, ctx) +} +func (f *logFile) Warn(msg string, ctx map[string]interface{}) { + f.fire("warn", msg, ctx) +} +func (f *logFile) Info(msg string, ctx map[string]interface{}) { + f.fire("info", msg, ctx) +} +func (f *logFile) Debug(msg string, ctx map[string]interface{}) { + if f.doDebug { + f.fire("debug", msg, ctx) + } +} +func (f *logFile) DebugEnabled() bool { return f.doDebug } diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_names.go b/vendor/github.com/newrelic/go-agent/internal/metric_names.go new file mode 100644 index 00000000000..cd9321e07b4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_names.go @@ -0,0 +1,262 @@ +package internal + +const ( + apdexRollup = "Apdex" + apdexPrefix = "Apdex/" + + webRollup = "WebTransaction" + backgroundRollup = "OtherTransaction/all" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Total-Time-Async.md + totalTimeWeb = "WebTransactionTotalTime" + totalTimeBackground = "OtherTransactionTotalTime" + + errorsPrefix = "Errors/" + + // "HttpDispatcher" metric is used for the overview graph, and + // therefore should only be made for web transactions. + dispatcherMetric = "HttpDispatcher" + + queueMetric = "WebFrontend/QueueTime" + + webMetricPrefix = "WebTransaction/Go" + backgroundMetricPrefix = "OtherTransaction/Go" + + instanceReporting = "Instance/Reporting" + + // https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + customEventsSeen = "Supportability/Events/Customer/Seen" + customEventsSent = "Supportability/Events/Customer/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md + txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen" + txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md + errorEventsSeen = "Supportability/Events/TransactionError/Seen" + errorEventsSent = "Supportability/Events/TransactionError/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md + spanEventsSeen = "Supportability/SpanEvent/TotalEventsSeen" + spanEventsSent = "Supportability/SpanEvent/TotalEventsSent" + + supportabilityDropped = "Supportability/MetricsDropped" + + // Runtime/System Metrics + memoryPhysical = "Memory/Physical" + heapObjectsAllocated = "Memory/Heap/AllocatedObjects" + cpuUserUtilization = "CPU/User/Utilization" + cpuSystemUtilization = "CPU/System/Utilization" + cpuUserTime = "CPU/User Time" + cpuSystemTime = "CPU/System Time" + runGoroutine = "Go/Runtime/Goroutines" + gcPauseFraction = "GC/System/Pause Fraction" + gcPauses = "GC/System/Pauses" + + // Distributed Tracing Supportability Metrics + supportTracingAcceptSuccess = "Supportability/DistributedTrace/AcceptPayload/Success" + supportTracingAcceptException = "Supportability/DistributedTrace/AcceptPayload/Exception" + supportTracingAcceptParseException = "Supportability/DistributedTrace/AcceptPayload/ParseException" + supportTracingCreateBeforeAccept = "Supportability/DistributedTrace/AcceptPayload/Ignored/CreateBeforeAccept" + supportTracingIgnoredMultiple = "Supportability/DistributedTrace/AcceptPayload/Ignored/Multiple" + supportTracingIgnoredVersion = "Supportability/DistributedTrace/AcceptPayload/Ignored/MajorVersion" + supportTracingAcceptUntrustedAccount = "Supportability/DistributedTrace/AcceptPayload/Ignored/UntrustedAccount" + supportTracingAcceptNull = "Supportability/DistributedTrace/AcceptPayload/Ignored/Null" + supportTracingCreatePayloadSuccess = "Supportability/DistributedTrace/CreatePayload/Success" + supportTracingCreatePayloadException = "Supportability/DistributedTrace/CreatePayload/Exception" + + // Configurable event harvest supportability metrics + supportReportPeriod = "Supportability/EventHarvest/ReportPeriod" + supportTxnEventLimit = "Supportability/EventHarvest/AnalyticEventData/HarvestLimit" + supportCustomEventLimit = "Supportability/EventHarvest/CustomEventData/HarvestLimit" + supportErrorEventLimit = "Supportability/EventHarvest/ErrorEventData/HarvestLimit" +) + +// DistributedTracingSupport is used to track distributed tracing activity for +// supportability. +type DistributedTracingSupport struct { + AcceptPayloadSuccess bool // AcceptPayload was called successfully + AcceptPayloadException bool // AcceptPayload had a generic exception + AcceptPayloadParseException bool // AcceptPayload had a parsing exception + AcceptPayloadCreateBeforeAccept bool // AcceptPayload was ignored because CreatePayload had already been called + AcceptPayloadIgnoredMultiple bool // AcceptPayload was ignored because AcceptPayload had already been called + AcceptPayloadIgnoredVersion bool // AcceptPayload was ignored because the payload's major version was greater than the agent's + AcceptPayloadUntrustedAccount bool // AcceptPayload was ignored because the payload was untrusted + AcceptPayloadNullPayload bool // AcceptPayload was ignored because the payload was nil + CreatePayloadSuccess bool // CreatePayload was called successfully + CreatePayloadException bool // CreatePayload had a generic exception +} + +type rollupMetric struct { + all string + allWeb string + allOther string +} + +func newRollupMetric(s string) rollupMetric { + return rollupMetric{ + all: s + "all", + allWeb: s + "allWeb", + allOther: s + "allOther", + } +} + +func (r rollupMetric) webOrOther(isWeb bool) string { + if isWeb { + return r.allWeb + } + return r.allOther +} + +var ( + errorsRollupMetric = newRollupMetric("Errors/") + + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md + // source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md + externalRollupMetric = newRollupMetric("External/") + + // source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md + datastoreRollupMetric = newRollupMetric("Datastore/") + + datastoreProductMetricsCache = map[string]rollupMetric{ + "Cassandra": newRollupMetric("Datastore/Cassandra/"), + "Derby": newRollupMetric("Datastore/Derby/"), + "Elasticsearch": newRollupMetric("Datastore/Elasticsearch/"), + "Firebird": newRollupMetric("Datastore/Firebird/"), + "IBMDB2": newRollupMetric("Datastore/IBMDB2/"), + "Informix": newRollupMetric("Datastore/Informix/"), + "Memcached": newRollupMetric("Datastore/Memcached/"), + "MongoDB": newRollupMetric("Datastore/MongoDB/"), + "MySQL": newRollupMetric("Datastore/MySQL/"), + "MSSQL": newRollupMetric("Datastore/MSSQL/"), + "Oracle": newRollupMetric("Datastore/Oracle/"), + "Postgres": newRollupMetric("Datastore/Postgres/"), + "Redis": newRollupMetric("Datastore/Redis/"), + "Solr": newRollupMetric("Datastore/Solr/"), + "SQLite": newRollupMetric("Datastore/SQLite/"), + "CouchDB": newRollupMetric("Datastore/CouchDB/"), + "Riak": newRollupMetric("Datastore/Riak/"), + "VoltDB": newRollupMetric("Datastore/VoltDB/"), + } +) + +func customSegmentMetric(s string) string { + return "Custom/" + s +} + +// customMetric is used to construct custom metrics from the input given to +// Application.RecordCustomMetric. Note that the "Custom/" prefix helps prevent +// collision with other agent metrics, but does not eliminate the possibility +// since "Custom/" is also used for segments. +func customMetric(customerInput string) string { + return "Custom/" + customerInput +} + +// DatastoreMetricKey contains the fields by which datastore metrics are +// aggregated. +type DatastoreMetricKey struct { + Product string + Collection string + Operation string + Host string + PortPathOrID string +} + +type externalMetricKey struct { + Host string + Library string + Method string + ExternalCrossProcessID string + ExternalTransactionName string +} + +func datastoreScopedMetric(key DatastoreMetricKey) string { + if "" != key.Collection { + return datastoreStatementMetric(key) + } + return datastoreOperationMetric(key) +} + +// Datastore/{datastore}/* +func datastoreProductMetric(key DatastoreMetricKey) rollupMetric { + d, ok := datastoreProductMetricsCache[key.Product] + if ok { + return d + } + return newRollupMetric("Datastore/" + key.Product + "/") +} + +// Datastore/operation/{datastore}/{operation} +func datastoreOperationMetric(key DatastoreMetricKey) string { + return "Datastore/operation/" + key.Product + + "/" + key.Operation +} + +// Datastore/statement/{datastore}/{table}/{operation} +func datastoreStatementMetric(key DatastoreMetricKey) string { + return "Datastore/statement/" + key.Product + + "/" + key.Collection + + "/" + key.Operation +} + +// Datastore/instance/{datastore}/{host}/{port_path_or_id} +func datastoreInstanceMetric(key DatastoreMetricKey) string { + return "Datastore/instance/" + key.Product + + "/" + key.Host + + "/" + key.PortPathOrID +} + +func (key externalMetricKey) scopedMetric() string { + if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName { + return externalTransactionMetric(key) + } + + if key.Method == "" { + // External/{host}/{library} + return "External/" + key.Host + "/" + key.Library + } + // External/{host}/{library}/{method} + return "External/" + key.Host + "/" + key.Library + "/" + key.Method +} + +// External/{host}/all +func externalHostMetric(key externalMetricKey) string { + return "External/" + key.Host + "/all" +} + +// ExternalApp/{host}/{external_id}/all +func externalAppMetric(key externalMetricKey) string { + return "ExternalApp/" + key.Host + + "/" + key.ExternalCrossProcessID + "/all" +} + +// ExternalTransaction/{host}/{external_id}/{external_txnname} +func externalTransactionMetric(key externalMetricKey) string { + return "ExternalTransaction/" + key.Host + + "/" + key.ExternalCrossProcessID + + "/" + key.ExternalTransactionName +} + +func callerFields(c payloadCaller) string { + return "/" + c.Type + + "/" + c.Account + + "/" + c.App + + "/" + c.TransportType + + "/" +} + +// DurationByCaller/{type}/{account}/{app}/{transport}/* +func durationByCallerMetric(c payloadCaller) rollupMetric { + return newRollupMetric("DurationByCaller" + callerFields(c)) +} + +// ErrorsByCaller/{type}/{account}/{app}/{transport}/* +func errorsByCallerMetric(c payloadCaller) rollupMetric { + return newRollupMetric("ErrorsByCaller" + callerFields(c)) +} + +// TransportDuration/{type}/{account}/{app}/{transport}/* +func transportDurationMetric(c payloadCaller) rollupMetric { + return newRollupMetric("TransportDuration" + callerFields(c)) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_rules.go b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go new file mode 100644 index 00000000000..b634a8b5b33 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go @@ -0,0 +1,164 @@ +package internal + +import ( + "encoding/json" + "regexp" + "sort" + "strings" +) + +type ruleResult int + +const ( + ruleMatched ruleResult = iota + ruleUnmatched + ruleIgnore +) + +type metricRule struct { + // 'Ignore' indicates if the entire transaction should be discarded if + // there is a match. This field is only used by "url_rules" and + // "transaction_name_rules", not "metric_name_rules". + Ignore bool `json:"ignore"` + EachSegment bool `json:"each_segment"` + ReplaceAll bool `json:"replace_all"` + Terminate bool `json:"terminate_chain"` + Order int `json:"eval_order"` + OriginalReplacement string `json:"replacement"` + RawExpr string `json:"match_expression"` + + // Go's regexp backreferences use '${1}' instead of the Perlish '\1', so + // we transform the replacement string into the Go syntax and store it + // here. + TransformedReplacement string + re *regexp.Regexp +} + +type metricRules []*metricRule + +// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must +// transform the replacement string. This is non-trivial: `\1` is a +// backreference but `\\1` is not. Rather than count the number of back slashes +// preceding the digit, we simply skip rules with tricky replacements. +var ( + transformReplacementAmbiguous = regexp.MustCompile(`\\\\([0-9]+)`) + transformReplacementRegex = regexp.MustCompile(`\\([0-9]+)`) + transformReplacementReplacement = "$${${1}}" +) + +func (rules *metricRules) UnmarshalJSON(data []byte) (err error) { + var raw []*metricRule + + if err := json.Unmarshal(data, &raw); nil != err { + return err + } + + valid := make(metricRules, 0, len(raw)) + + for _, r := range raw { + re, err := regexp.Compile("(?i)" + r.RawExpr) + if err != nil { + // TODO + // Warn("unable to compile rule", { + // "match_expression": r.RawExpr, + // "error": err.Error(), + // }) + continue + } + + if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) { + // TODO + // Warn("unable to transform replacement", { + // "match_expression": r.RawExpr, + // "replacement": r.OriginalReplacement, + // }) + continue + } + + r.re = re + r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement, + transformReplacementReplacement) + valid = append(valid, r) + } + + sort.Sort(valid) + + *rules = valid + return nil +} + +func (rules metricRules) Len() int { + return len(rules) +} + +// Rules should be applied in increasing order +func (rules metricRules) Less(i, j int) bool { + return rules[i].Order < rules[j].Order +} +func (rules metricRules) Swap(i, j int) { + rules[i], rules[j] = rules[j], rules[i] +} + +func replaceFirst(re *regexp.Regexp, s string, replacement string) (ruleResult, string) { + // Note that ReplaceAllStringFunc cannot be used here since it does + // not replace $1 placeholders. + loc := re.FindStringIndex(s) + if nil == loc { + return ruleUnmatched, s + } + firstMatch := s[loc[0]:loc[1]] + firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement) + return ruleMatched, s[0:loc[0]] + firstMatchReplaced + s[loc[1]:] +} + +func (r *metricRule) apply(s string) (ruleResult, string) { + // Rules are strange, and there is no spec. + // This code attempts to duplicate the logic of the PHP agent. + // Ambiguity abounds. + + if r.Ignore { + if r.re.MatchString(s) { + return ruleIgnore, "" + } + return ruleUnmatched, s + } + + if r.ReplaceAll { + if r.re.MatchString(s) { + return ruleMatched, r.re.ReplaceAllString(s, r.TransformedReplacement) + } + return ruleUnmatched, s + } else if r.EachSegment { + segments := strings.Split(s, "/") + applied := make([]string, len(segments)) + result := ruleUnmatched + for i, segment := range segments { + var segmentMatched ruleResult + segmentMatched, applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement) + if segmentMatched == ruleMatched { + result = ruleMatched + } + } + return result, strings.Join(applied, "/") + } else { + return replaceFirst(r.re, s, r.TransformedReplacement) + } +} + +func (rules metricRules) Apply(input string) string { + var res ruleResult + s := input + + for _, rule := range rules { + res, s = rule.apply(s) + + if ruleIgnore == res { + return "" + } + if (ruleMatched == res) && rule.Terminate { + break + } + } + + return s +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metrics.go b/vendor/github.com/newrelic/go-agent/internal/metrics.go new file mode 100644 index 00000000000..1cbc5fcf45d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metrics.go @@ -0,0 +1,261 @@ +package internal + +import ( + "bytes" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type metricForce int + +const ( + forced metricForce = iota + unforced +) + +type metricID struct { + Name string `json:"name"` + Scope string `json:"scope,omitempty"` +} + +type metricData struct { + // These values are in the units expected by the collector. + countSatisfied float64 // Seconds, or count for Apdex + totalTolerated float64 // Seconds, or count for Apdex + exclusiveFailed float64 // Seconds, or count for Apdex + min float64 // Seconds + max float64 // Seconds + sumSquares float64 // Seconds**2, or 0 for Apdex +} + +func metricDataFromDuration(duration, exclusive time.Duration) metricData { + ds := duration.Seconds() + return metricData{ + countSatisfied: 1, + totalTolerated: ds, + exclusiveFailed: exclusive.Seconds(), + min: ds, + max: ds, + sumSquares: ds * ds, + } +} + +type metric struct { + forced metricForce + data metricData +} + +type metricTable struct { + metricPeriodStart time.Time + failedHarvests int + maxTableSize int // After this max is reached, only forced metrics are added + metrics map[metricID]*metric +} + +func newMetricTable(maxTableSize int, now time.Time) *metricTable { + return &metricTable{ + metricPeriodStart: now, + metrics: make(map[metricID]*metric), + maxTableSize: maxTableSize, + failedHarvests: 0, + } +} + +func (mt *metricTable) full() bool { + return len(mt.metrics) >= mt.maxTableSize +} + +func (data *metricData) aggregate(src metricData) { + data.countSatisfied += src.countSatisfied + data.totalTolerated += src.totalTolerated + data.exclusiveFailed += src.exclusiveFailed + + if src.min < data.min { + data.min = src.min + } + if src.max > data.max { + data.max = src.max + } + + data.sumSquares += src.sumSquares +} + +func (mt *metricTable) mergeMetric(id metricID, m metric) { + if to := mt.metrics[id]; nil != to { + to.data.aggregate(m.data) + return + } + + if mt.full() && (unforced == m.forced) { + mt.addSingleCount(supportabilityDropped, forced) + return + } + // NOTE: `new` is used in place of `&m` since the latter will make `m` + // get heap allocated regardless of whether or not this line gets + // reached (running go version go1.5 darwin/amd64). See + // BenchmarkAddingSameMetrics. + alloc := new(metric) + *alloc = m + mt.metrics[id] = alloc +} + +func (mt *metricTable) mergeFailed(from *metricTable) { + fails := from.failedHarvests + 1 + if fails >= failedMetricAttemptsLimit { + return + } + if from.metricPeriodStart.Before(mt.metricPeriodStart) { + mt.metricPeriodStart = from.metricPeriodStart + } + mt.failedHarvests = fails + mt.merge(from, "") +} + +func (mt *metricTable) merge(from *metricTable, newScope string) { + if "" == newScope { + for id, m := range from.metrics { + mt.mergeMetric(id, *m) + } + } else { + for id, m := range from.metrics { + mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m) + } + } +} + +func (mt *metricTable) add(name, scope string, data metricData, force metricForce) { + mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force}) +} + +func (mt *metricTable) addCount(name string, count float64, force metricForce) { + mt.add(name, "", metricData{countSatisfied: count}, force) +} + +func (mt *metricTable) addSingleCount(name string, force metricForce) { + mt.addCount(name, float64(1), force) +} + +func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) { + mt.add(name, scope, metricDataFromDuration(duration, exclusive), force) +} + +func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) { + data := metricData{ + countSatisfied: 1, + totalTolerated: total, + exclusiveFailed: exclusive, + min: total, + max: total, + sumSquares: total * total, + } + mt.add(name, scope, data, force) +} + +func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) { + mt.addValueExclusive(name, scope, total, total, force) +} + +func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) { + apdexSeconds := apdexThreshold.Seconds() + data := metricData{min: apdexSeconds, max: apdexSeconds} + + switch zone { + case ApdexSatisfying: + data.countSatisfied = 1 + case ApdexTolerating: + data.totalTolerated = 1 + case ApdexFailing: + data.exclusiveFailed = 1 + } + + mt.add(name, scope, data, force) +} + +func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { + if 0 == len(mt.metrics) { + return nil, nil + } + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + buf.WriteByte('[') + + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + jsonx.AppendInt(buf, mt.metricPeriodStart.Unix()) + buf.WriteByte(',') + jsonx.AppendInt(buf, now.Unix()) + buf.WriteByte(',') + + buf.WriteByte('[') + first := true + for id, metric := range mt.metrics { + if first { + first = false + } else { + buf.WriteByte(',') + } + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"name":`) + jsonx.AppendString(buf, id.Name) + if id.Scope != "" { + buf.WriteString(`,"scope":`) + jsonx.AppendString(buf, id.Scope) + } + buf.WriteByte('}') + buf.WriteByte(',') + + jsonx.AppendFloatArray(buf, + metric.data.countSatisfied, + metric.data.totalTolerated, + metric.data.exclusiveFailed, + metric.data.min, + metric.data.max, + metric.data.sumSquares) + + buf.WriteByte(']') + } + buf.WriteByte(']') + + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return mt.CollectorJSON(agentRunID, harvestStart) +} +func (mt *metricTable) MergeIntoHarvest(h *Harvest) { + h.Metrics.mergeFailed(mt) +} + +func (mt *metricTable) ApplyRules(rules metricRules) *metricTable { + if nil == rules { + return mt + } + if len(rules) == 0 { + return mt + } + + applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart) + cache := make(map[string]string) + + for id, m := range mt.metrics { + out, ok := cache[id.Name] + if !ok { + out = rules.Apply(id.Name) + cache[id.Name] = out + } + + if "" != out { + applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m) + } + } + + return applied +} + +func (mt *metricTable) EndpointMethod() string { + return cmdMetrics +} diff --git a/vendor/github.com/newrelic/go-agent/internal/obfuscate.go b/vendor/github.com/newrelic/go-agent/internal/obfuscate.go new file mode 100644 index 00000000000..0fcf859667e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/obfuscate.go @@ -0,0 +1,39 @@ +package internal + +import ( + "encoding/base64" + "errors" +) + +// Deobfuscate deobfuscates a byte array. +func Deobfuscate(in string, key []byte) ([]byte, error) { + if len(key) == 0 { + return nil, errors.New("key cannot be zero length") + } + + decoded, err := base64.StdEncoding.DecodeString(in) + if err != nil { + return nil, err + } + + out := make([]byte, len(decoded)) + for i, c := range decoded { + out[i] = c ^ key[i%len(key)] + } + + return out, nil +} + +// Obfuscate obfuscates a byte array for transmission in CAT and RUM. +func Obfuscate(in, key []byte) (string, error) { + if len(key) == 0 { + return "", errors.New("key cannot be zero length") + } + + out := make([]byte, len(in)) + for i, c := range in { + out[i] = c ^ key[i%len(key)] + } + + return base64.StdEncoding.EncodeToString(out), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/priority.go b/vendor/github.com/newrelic/go-agent/internal/priority.go new file mode 100644 index 00000000000..e7aae796e8a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/priority.go @@ -0,0 +1,27 @@ +package internal + +// Priority allows for a priority sampling of events. When an event +// is created it is given a Priority. Whenever an event pool is +// full and events need to be dropped, the events with the lowest priority +// are dropped. +type Priority float32 + +// According to spec, Agents SHOULD truncate the value to at most 6 +// digits past the decimal point. +const ( + priorityFormat = "%.6f" +) + +// NewPriority returns a new priority. +func NewPriority() Priority { + return Priority(RandFloat32()) +} + +// Float32 returns the priority as a float32. +func (p Priority) Float32() float32 { + return float32(p) +} + +func (p Priority) isLowerPriority(y Priority) bool { + return p < y +} diff --git a/vendor/github.com/newrelic/go-agent/internal/queuing.go b/vendor/github.com/newrelic/go-agent/internal/queuing.go new file mode 100644 index 00000000000..cc361f82088 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/queuing.go @@ -0,0 +1,72 @@ +package internal + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +const ( + xRequestStart = "X-Request-Start" + xQueueStart = "X-Queue-Start" +) + +var ( + earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() + latestAcceptableSeconds = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() +) + +func checkQueueTimeSeconds(secondsFloat float64) time.Time { + seconds := int64(secondsFloat) + nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0)) + if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds { + return time.Unix(seconds, nanos) + } + return time.Time{} +} + +func parseQueueTime(s string) time.Time { + f, err := strconv.ParseFloat(s, 64) + if nil != err { + return time.Time{} + } + if f <= 0 { + return time.Time{} + } + + // try microseconds + if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() { + return t + } + // try milliseconds + if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() { + return t + } + // try seconds + if t := checkQueueTimeSeconds(f); !t.IsZero() { + return t + } + return time.Time{} +} + +// QueueDuration TODO +func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration { + s := hdr.Get(xQueueStart) + if "" == s { + s = hdr.Get(xRequestStart) + } + if "" == s { + return 0 + } + + s = strings.TrimPrefix(s, "t=") + qt := parseQueueTime(s) + if qt.IsZero() { + return 0 + } + if qt.After(txnStart) { + return 0 + } + return txnStart.Sub(qt) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/rand.go b/vendor/github.com/newrelic/go-agent/internal/rand.go new file mode 100644 index 00000000000..7e76d7d585c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/rand.go @@ -0,0 +1,59 @@ +package internal + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededRand = struct { + sync.Mutex + *rand.Rand + }{ + Rand: rand.New(rand.NewSource(int64(time.Now().UnixNano()))), + } +) + +// RandUint64 returns a random uint64. +// +// IMPORTANT! The default rand package functions are not used, since we want to +// minimize the chance that different Go processes duplicate the same +// transaction id. (Note that the rand top level functions "use a default +// shared Source that produces a deterministic sequence of values each time a +// program is run" (and we don't seed the shared Source to avoid changing +// customer apps' behavior)). +func RandUint64() uint64 { + seededRand.Lock() + defer seededRand.Unlock() + + u1 := seededRand.Uint32() + u2 := seededRand.Uint32() + return (uint64(u1) << 32) | uint64(u2) +} + +// RandUint32 returns a random uint32. +func RandUint32() uint32 { + seededRand.Lock() + defer seededRand.Unlock() + + return seededRand.Uint32() +} + +// RandFloat32 returns a random float32 between 0.0 and 1.0. +func RandFloat32() float32 { + seededRand.Lock() + defer seededRand.Unlock() + + for { + if r := seededRand.Float32(); 0.0 != r { + return r + } + } +} + +// RandUint64N returns a random int64 that's +// between 0 and the passed in max, non-inclusive +func RandUint64N(max uint64) uint64 { + return RandUint64() % max +} diff --git a/vendor/github.com/newrelic/go-agent/internal/rules_cache.go b/vendor/github.com/newrelic/go-agent/internal/rules_cache.go new file mode 100644 index 00000000000..d8357075322 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/rules_cache.go @@ -0,0 +1,52 @@ +package internal + +import "sync" + +// rulesCache is designed to avoid applying url-rules, txn-name-rules, and +// segment-rules since regexes are expensive! +type rulesCache struct { + sync.RWMutex + cache map[rulesCacheKey]string + maxCacheSize int +} + +type rulesCacheKey struct { + isWeb bool + inputName string +} + +func newRulesCache(maxCacheSize int) *rulesCache { + return &rulesCache{ + cache: make(map[rulesCacheKey]string, maxCacheSize), + maxCacheSize: maxCacheSize, + } +} + +func (cache *rulesCache) find(inputName string, isWeb bool) string { + if nil == cache { + return "" + } + cache.RLock() + defer cache.RUnlock() + + return cache.cache[rulesCacheKey{ + inputName: inputName, + isWeb: isWeb, + }] +} + +func (cache *rulesCache) set(inputName string, isWeb bool, finalName string) { + if nil == cache { + return + } + cache.Lock() + defer cache.Unlock() + + if len(cache.cache) >= cache.maxCacheSize { + return + } + cache.cache[rulesCacheKey{ + inputName: inputName, + isWeb: isWeb, + }] = finalName +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sampler.go b/vendor/github.com/newrelic/go-agent/internal/sampler.go new file mode 100644 index 00000000000..d78cdc64051 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sampler.go @@ -0,0 +1,145 @@ +package internal + +import ( + "runtime" + "time" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// Sample is a system/runtime snapshot. +type Sample struct { + when time.Time + memStats runtime.MemStats + usage sysinfo.Usage + numGoroutine int + numCPU int +} + +func bytesToMebibytesFloat(bts uint64) float64 { + return float64(bts) / (1024 * 1024) +} + +// GetSample gathers a new Sample. +func GetSample(now time.Time, lg logger.Logger) *Sample { + s := Sample{ + when: now, + numGoroutine: runtime.NumGoroutine(), + numCPU: runtime.NumCPU(), + } + + if usage, err := sysinfo.GetUsage(); err == nil { + s.usage = usage + } else { + lg.Warn("unable to usage", map[string]interface{}{ + "error": err.Error(), + }) + } + + runtime.ReadMemStats(&s.memStats) + + return &s +} + +type cpuStats struct { + used time.Duration + fraction float64 // used / (elapsed * numCPU) +} + +// Stats contains system information for a period of time. +type Stats struct { + numGoroutine int + allocBytes uint64 + heapObjects uint64 + user cpuStats + system cpuStats + gcPauseFraction float64 + deltaNumGC uint32 + deltaPauseTotal time.Duration + minPause time.Duration + maxPause time.Duration +} + +// Samples is used as the parameter to GetStats to avoid mixing up the previous +// and current sample. +type Samples struct { + Previous *Sample + Current *Sample +} + +// GetStats combines two Samples into a Stats. +func GetStats(ss Samples) Stats { + cur := ss.Current + prev := ss.Previous + elapsed := cur.when.Sub(prev.when) + + s := Stats{ + numGoroutine: cur.numGoroutine, + allocBytes: cur.memStats.Alloc, + heapObjects: cur.memStats.HeapObjects, + } + + // CPU Utilization + totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU) + if prev.usage.User != 0 && cur.usage.User > prev.usage.User { + s.user.used = cur.usage.User - prev.usage.User + s.user.fraction = s.user.used.Seconds() / totalCPUSeconds + } + if prev.usage.System != 0 && cur.usage.System > prev.usage.System { + s.system.used = cur.usage.System - prev.usage.System + s.system.fraction = s.system.used.Seconds() / totalCPUSeconds + } + + // GC Pause Fraction + deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs + frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds()) + s.gcPauseFraction = frac + + // GC Pauses + if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 { + // In case more than 256 pauses have happened between samples + // and we are examining a subset of the pauses, we ensure that + // the min and max are not on the same side of the average by + // using the average as the starting min and max. + maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ { + pause := cur.memStats.PauseNs[(i+255)%256] + if pause > maxPauseNs { + maxPauseNs = pause + } + if pause < minPauseNs { + minPauseNs = pause + } + } + s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond + s.deltaNumGC = deltaNumGC + s.minPause = time.Duration(minPauseNs) * time.Nanosecond + s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond + } + + return s +} + +// MergeIntoHarvest implements Harvestable. +func (s Stats) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced) + h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced) + h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced) + h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced) + h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced) + h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced) + h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced) + h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced) + if s.deltaNumGC > 0 { + h.Metrics.add(gcPauses, "", metricData{ + countSatisfied: float64(s.deltaNumGC), + totalTolerated: s.deltaPauseTotal.Seconds(), + exclusiveFailed: 0, + min: s.minPause.Seconds(), + max: s.maxPause.Seconds(), + sumSquares: s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(), + }, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/security_policies.go b/vendor/github.com/newrelic/go-agent/internal/security_policies.go new file mode 100644 index 00000000000..d8d119b7798 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/security_policies.go @@ -0,0 +1,111 @@ +package internal + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// Security policies documentation: +// https://source.datanerd.us/agents/agent-specs/blob/master/Language-Agent-Security-Policies.md + +// SecurityPolicies contains the security policies. +type SecurityPolicies struct { + RecordSQL securityPolicy `json:"record_sql"` + AttributesInclude securityPolicy `json:"attributes_include"` + AllowRawExceptionMessages securityPolicy `json:"allow_raw_exception_messages"` + CustomEvents securityPolicy `json:"custom_events"` + CustomParameters securityPolicy `json:"custom_parameters"` +} + +// PointerIfPopulated returns a reference to the security policies if they have +// been populated from JSON. +func (sp *SecurityPolicies) PointerIfPopulated() *SecurityPolicies { + emptyPolicies := SecurityPolicies{} + if nil != sp && *sp != emptyPolicies { + return sp + } + return nil +} + +type securityPolicy struct { + EnabledVal *bool `json:"enabled"` +} + +func (p *securityPolicy) Enabled() bool { return nil == p.EnabledVal || *p.EnabledVal } +func (p *securityPolicy) SetEnabled(enabled bool) { p.EnabledVal = &enabled } +func (p *securityPolicy) IsSet() bool { return nil != p.EnabledVal } + +type policyer interface { + SetEnabled(bool) + IsSet() bool +} + +// UnmarshalJSON decodes security policies sent from the preconnect endpoint. +func (sp *SecurityPolicies) UnmarshalJSON(data []byte) (er error) { + defer func() { + // Zero out all fields if there is an error to ensure that the + // populated check works. + if er != nil { + *sp = SecurityPolicies{} + } + }() + + var raw map[string]struct { + Enabled bool `json:"enabled"` + Required bool `json:"required"` + } + err := json.Unmarshal(data, &raw) + if err != nil { + return fmt.Errorf("unable to unmarshal security policies: %v", err) + } + + knownPolicies := make(map[string]policyer) + + spv := reflect.ValueOf(sp).Elem() + for i := 0; i < spv.NumField(); i++ { + fieldAddress := spv.Field(i).Addr() + field := fieldAddress.Interface().(policyer) + name := spv.Type().Field(i).Tag.Get("json") + knownPolicies[name] = field + } + + for name, policy := range raw { + p, ok := knownPolicies[name] + if !ok { + if policy.Required { + return errUnknownRequiredPolicy{name: name} + } + } else { + p.SetEnabled(policy.Enabled) + } + } + for name, policy := range knownPolicies { + if !policy.IsSet() { + return errUnsetPolicy{name: name} + } + } + return nil +} + +type errUnknownRequiredPolicy struct{ name string } + +func (err errUnknownRequiredPolicy) Error() string { + return fmt.Sprintf("policy '%s' is unrecognized, please check for a newer agent version or contact support", err.name) +} + +type errUnsetPolicy struct{ name string } + +func (err errUnsetPolicy) Error() string { + return fmt.Sprintf("policy '%s' not received, please contact support", err.name) +} + +func isDisconnectSecurityPolicyError(e error) bool { + if _, ok := e.(errUnknownRequiredPolicy); ok { + return true + } + if _, ok := e.(errUnsetPolicy); ok { + return true + } + return false +} diff --git a/vendor/github.com/newrelic/go-agent/internal/segment_terms.go b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go new file mode 100644 index 00000000000..a0fd1f2e667 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go @@ -0,0 +1,145 @@ +package internal + +// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules + +import ( + "encoding/json" + "strings" +) + +const ( + placeholder = "*" + separator = "/" +) + +type segmentRule struct { + Prefix string `json:"prefix"` + Terms []string `json:"terms"` + TermsMap map[string]struct{} +} + +// segmentRules is keyed by each segmentRule's Prefix field with any trailing +// slash removed. +type segmentRules map[string]*segmentRule + +func buildTermsMap(terms []string) map[string]struct{} { + m := make(map[string]struct{}, len(terms)) + for _, t := range terms { + m[t] = struct{}{} + } + return m +} + +func (rules *segmentRules) UnmarshalJSON(b []byte) error { + var raw []*segmentRule + + if err := json.Unmarshal(b, &raw); nil != err { + return err + } + + rs := make(map[string]*segmentRule) + + for _, rule := range raw { + prefix := strings.TrimSuffix(rule.Prefix, "/") + if len(strings.Split(prefix, "/")) != 2 { + // TODO + // Warn("invalid segment term rule prefix", + // {"prefix": rule.Prefix}) + continue + } + + if nil == rule.Terms { + // TODO + // Warn("segment term rule has missing terms", + // {"prefix": rule.Prefix}) + continue + } + + rule.TermsMap = buildTermsMap(rule.Terms) + + rs[prefix] = rule + } + + *rules = rs + return nil +} + +func (rule *segmentRule) apply(name string) string { + if !strings.HasPrefix(name, rule.Prefix) { + return name + } + + s := strings.TrimPrefix(name, rule.Prefix) + + leadingSlash := "" + if strings.HasPrefix(s, separator) { + leadingSlash = separator + s = strings.TrimPrefix(s, separator) + } + + if "" != s { + segments := strings.Split(s, separator) + + for i, segment := range segments { + _, whitelisted := rule.TermsMap[segment] + if whitelisted { + segments[i] = segment + } else { + segments[i] = placeholder + } + } + + segments = collapsePlaceholders(segments) + s = strings.Join(segments, separator) + } + + return rule.Prefix + leadingSlash + s +} + +func (rules segmentRules) apply(name string) string { + if nil == rules { + return name + } + + rule, ok := rules[firstTwoSegments(name)] + if !ok { + return name + } + + return rule.apply(name) +} + +func firstTwoSegments(name string) string { + firstSlashIdx := strings.Index(name, separator) + if firstSlashIdx == -1 { + return name + } + + secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator) + if secondSlashIdx == -1 { + return name + } + + return name[0 : firstSlashIdx+secondSlashIdx+1] +} + +func collapsePlaceholders(segments []string) []string { + j := 0 + prevStar := false + for i := 0; i < len(segments); i++ { + segment := segments[i] + if placeholder == segment { + if prevStar { + continue + } + segments[j] = placeholder + j++ + prevStar = true + } else { + segments[j] = segment + j++ + prevStar = false + } + } + return segments[0:j] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/serverless.go b/vendor/github.com/newrelic/go-agent/internal/serverless.go new file mode 100644 index 00000000000..414d0ebd05a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/serverless.go @@ -0,0 +1,217 @@ +package internal + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + lambdaMetadataVersion = 2 + + // AgentLanguage is used in the connect JSON and the Lambda JSON. + AgentLanguage = "go" +) + +// ServerlessHarvest is used to store and log data when the agent is running in +// serverless mode. +type ServerlessHarvest struct { + logger logger.Logger + version string + awsExecutionEnv string + + // The Lambda handler could be using multiple goroutines so we use a + // mutex to prevent race conditions. + sync.Mutex + harvest *Harvest +} + +// NewServerlessHarvest creates a new ServerlessHarvest. +func NewServerlessHarvest(logger logger.Logger, version string, getEnv func(string) string) *ServerlessHarvest { + return &ServerlessHarvest{ + logger: logger, + version: version, + awsExecutionEnv: getEnv("AWS_EXECUTION_ENV"), + + // A ConnectReply parameter to NewHarvest isn't needed because + // serverless mode doesn't have a connect, and therefore won't + // have custom event limits from the server. + harvest: NewHarvest(time.Now(), nil), + } +} + +// Consume adds data to the harvest. +func (sh *ServerlessHarvest) Consume(data Harvestable) { + if nil == sh { + return + } + sh.Lock() + defer sh.Unlock() + + data.MergeIntoHarvest(sh.harvest) +} + +func (sh *ServerlessHarvest) swapHarvest() *Harvest { + sh.Lock() + defer sh.Unlock() + + h := sh.harvest + sh.harvest = NewHarvest(time.Now(), nil) + return h +} + +// Write logs the data in the format described by: +// https://source.datanerd.us/agents/agent-specs/blob/master/Lambda.md +func (sh *ServerlessHarvest) Write(arn string, writer io.Writer) { + if nil == sh { + return + } + harvest := sh.swapHarvest() + payloads := harvest.Payloads(false) + // Note that *json.RawMessage (instead of json.RawMessage) is used to + // support older Go versions: https://go-review.googlesource.com/c/go/+/21811/ + harvestPayloads := make(map[string]*json.RawMessage, len(payloads)) + for _, p := range payloads { + agentRunID := "" + cmd := p.EndpointMethod() + data, err := p.Data(agentRunID, time.Now()) + if err != nil { + sh.logger.Error("error creating payload json", map[string]interface{}{ + "command": cmd, + "error": err.Error(), + }) + continue + } + if nil == data { + continue + } + // NOTE! This code relies on the fact that each payload is + // using a different endpoint method. Sometimes the transaction + // events payload might be split, but since there is only one + // transaction event per serverless transaction, that's not an + // issue. Likewise, if we ever split normal transaction events + // apart from synthetics events, the transaction will either be + // normal or synthetic, so that won't be an issue. Log an error + // if this happens for future defensiveness. + if _, ok := harvestPayloads[cmd]; ok { + sh.logger.Error("data with duplicate command name lost", map[string]interface{}{ + "command": cmd, + }) + } + d := json.RawMessage(data) + harvestPayloads[cmd] = &d + } + + if len(harvestPayloads) == 0 { + // The harvest may not contain any data if the serverless + // transaction was ignored. + return + } + + data, err := json.Marshal(harvestPayloads) + if nil != err { + sh.logger.Error("error creating serverless data json", map[string]interface{}{ + "error": err.Error(), + }) + return + } + + var dataBuf bytes.Buffer + gz := gzip.NewWriter(&dataBuf) + gz.Write(data) + gz.Flush() + gz.Close() + + js, err := json.Marshal([]interface{}{ + lambdaMetadataVersion, + "NR_LAMBDA_MONITORING", + struct { + MetadataVersion int `json:"metadata_version"` + ARN string `json:"arn,omitempty"` + ProtocolVersion int `json:"protocol_version"` + ExecutionEnvironment string `json:"execution_environment,omitempty"` + AgentVersion string `json:"agent_version"` + AgentLanguage string `json:"agent_language"` + }{ + MetadataVersion: lambdaMetadataVersion, + ProtocolVersion: ProcotolVersion, + AgentVersion: sh.version, + ExecutionEnvironment: sh.awsExecutionEnv, + ARN: arn, + AgentLanguage: AgentLanguage, + }, + base64.StdEncoding.EncodeToString(dataBuf.Bytes()), + }) + + if err != nil { + sh.logger.Error("error creating serverless json", map[string]interface{}{ + "error": err.Error(), + }) + return + } + + fmt.Fprintln(writer, string(js)) +} + +// ParseServerlessPayload exists for testing. +func ParseServerlessPayload(data []byte) (metadata, uncompressedData map[string]json.RawMessage, err error) { + var arr [4]json.RawMessage + if err = json.Unmarshal(data, &arr); nil != err { + err = fmt.Errorf("unable to unmarshal serverless data array: %v", err) + return + } + var dataJSON []byte + compressed := strings.Trim(string(arr[3]), `"`) + if dataJSON, err = decodeUncompress(compressed); nil != err { + err = fmt.Errorf("unable to uncompress serverless data: %v", err) + return + } + if err = json.Unmarshal(dataJSON, &uncompressedData); nil != err { + err = fmt.Errorf("unable to unmarshal uncompressed serverless data: %v", err) + return + } + if err = json.Unmarshal(arr[2], &metadata); nil != err { + err = fmt.Errorf("unable to unmarshal serverless metadata: %v", err) + return + } + return +} + +func decodeUncompress(input string) ([]byte, error) { + decoded, err := base64.StdEncoding.DecodeString(input) + if nil != err { + return nil, err + } + + buf := bytes.NewBuffer(decoded) + gz, err := gzip.NewReader(buf) + if nil != err { + return nil, err + } + var out bytes.Buffer + io.Copy(&out, gz) + gz.Close() + + return out.Bytes(), nil +} + +// ServerlessWriter is implemented by newrelic.Application. +type ServerlessWriter interface { + ServerlessWrite(arn string, writer io.Writer) +} + +// ServerlessWrite exists to avoid type assertion in the nrlambda integration +// package. +func ServerlessWrite(app interface{}, arn string, writer io.Writer) { + if s, ok := app.(ServerlessWriter); ok { + s.ServerlessWrite(arn, writer) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/slow_queries.go b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go new file mode 100644 index 00000000000..36f435fcd56 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go @@ -0,0 +1,261 @@ +package internal + +import ( + "bytes" + "container/heap" + "hash/fnv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type queryParameters map[string]interface{} + +func vetQueryParameters(params map[string]interface{}) (queryParameters, error) { + if nil == params { + return nil, nil + } + // Copying the parameters into a new map is safer than modifying the map + // from the customer. + vetted := make(map[string]interface{}) + var retErr error + for key, val := range params { + val, err := ValidateUserAttribute(key, val) + if nil != err { + retErr = err + continue + } + vetted[key] = val + } + return queryParameters(vetted), retErr +} + +func (q queryParameters) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + for key, val := range q { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') +} + +// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md + +// slowQueryInstance represents a single datastore call. +type slowQueryInstance struct { + // Fields populated right after the datastore segment finishes: + + Duration time.Duration + DatastoreMetric string + ParameterizedQuery string + QueryParameters queryParameters + Host string + PortPathOrID string + DatabaseName string + StackTrace StackTrace + + TxnEvent +} + +// Aggregation is performed to avoid reporting multiple slow queries with same +// query string. Since some datastore segments may be below the slow query +// threshold, the aggregation fields Count, Total, and Min should be taken with +// a grain of salt. +type slowQuery struct { + Count int32 // number of times the query has been observed + Total time.Duration // cummulative duration + Min time.Duration // minimum observed duration + + // When Count > 1, slowQueryInstance contains values from the slowest + // observation. + slowQueryInstance +} + +type slowQueries struct { + priorityQueue []*slowQuery + // lookup maps query strings to indices in the priorityQueue + lookup map[string]int +} + +func (slows *slowQueries) Len() int { + return len(slows.priorityQueue) +} +func (slows *slowQueries) Less(i, j int) bool { + pq := slows.priorityQueue + return pq[i].Duration < pq[j].Duration +} +func (slows *slowQueries) Swap(i, j int) { + pq := slows.priorityQueue + si := pq[i] + sj := pq[j] + pq[i], pq[j] = pq[j], pq[i] + slows.lookup[si.ParameterizedQuery] = j + slows.lookup[sj.ParameterizedQuery] = i +} + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (slows *slowQueries) Push(x interface{}) {} +func (slows *slowQueries) Pop() interface{} { return nil } + +func newSlowQueries(max int) *slowQueries { + return &slowQueries{ + lookup: make(map[string]int, max), + priorityQueue: make([]*slowQuery, 0, max), + } +} + +// Merge is used to merge slow queries from the transaction into the harvest. +func (slows *slowQueries) Merge(other *slowQueries, txnEvent TxnEvent) { + for _, s := range other.priorityQueue { + cp := *s + cp.TxnEvent = txnEvent + slows.observe(cp) + } +} + +// merge aggregates the observations from two slow queries with the same Query. +func (slow *slowQuery) merge(other slowQuery) { + slow.Count += other.Count + slow.Total += other.Total + + if other.Min < slow.Min { + slow.Min = other.Min + } + if other.Duration > slow.Duration { + slow.slowQueryInstance = other.slowQueryInstance + } +} + +func (slows *slowQueries) observeInstance(slow slowQueryInstance) { + slows.observe(slowQuery{ + Count: 1, + Total: slow.Duration, + Min: slow.Duration, + slowQueryInstance: slow, + }) +} + +func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) { + cpy := new(slowQuery) + *cpy = slow + slows.priorityQueue[idx] = cpy + slows.lookup[slow.ParameterizedQuery] = idx + heap.Fix(slows, idx) +} + +func (slows *slowQueries) observe(slow slowQuery) { + // Has the query has previously been observed? + if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok { + slows.priorityQueue[idx].merge(slow) + heap.Fix(slows, idx) + return + } + // Has the collection reached max capacity? + if len(slows.priorityQueue) < cap(slows.priorityQueue) { + idx := len(slows.priorityQueue) + slows.priorityQueue = slows.priorityQueue[0 : idx+1] + slows.insertAtIndex(slow, idx) + return + } + // Is this query slower than the existing fastest? + fastest := slows.priorityQueue[0] + if slow.Duration > fastest.Duration { + delete(slows.lookup, fastest.ParameterizedQuery) + slows.insertAtIndex(slow, 0) + return + } +} + +// The third element of the slow query JSON should be a hash of the query +// string. This hash may be used by backend services to aggregate queries which +// have the have the same query string. It is unknown if this actually used. +func makeSlowQueryID(query string) uint32 { + h := fnv.New32a() + h.Write([]byte(query)) + return h.Sum32() +} + +func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendString(buf, slow.TxnEvent.FinalName) + buf.WriteByte(',') + // Include request.uri if it is included in any destination. + // TODO: Change this to the transaction trace segment destination + // once transaction trace segment attribute configuration has been + // added. + uri, _ := slow.TxnEvent.Attrs.GetAgentValue(attributeRequestURI, DestAll) + jsonx.AppendString(buf, uri) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery))) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.ParameterizedQuery) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.DatastoreMetric) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(slow.Count)) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0) + buf.WriteByte(',') + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if "" != slow.Host { + w.stringField("host", slow.Host) + } + if "" != slow.PortPathOrID { + w.stringField("port_path_or_id", slow.PortPathOrID) + } + if "" != slow.DatabaseName { + w.stringField("database_name", slow.DatabaseName) + } + if nil != slow.StackTrace { + w.writerField("backtrace", slow.StackTrace) + } + if nil != slow.QueryParameters { + w.writerField("query_parameters", slow.QueryParameters) + } + + sharedBetterCATIntrinsics(&slow.TxnEvent, &w) + + buf.WriteByte('}') + buf.WriteByte(']') +} + +// WriteJSON marshals the collection of slow queries into JSON according to the +// schema expected by the collector. +// +// Note: This JSON does not contain the agentRunID. This is for unknown +// historical reasons. Since the agentRunID is included in the url, +// its use in the other commands' JSON is redundant (although required). +func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + buf.WriteByte('[') + for idx, s := range slows.priorityQueue { + if idx > 0 { + buf.WriteByte(',') + } + s.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') +} + +func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(slows.priorityQueue) { + return nil, nil + } + estimate := 1024 * len(slows.priorityQueue) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + slows.WriteJSON(buf) + return buf.Bytes(), nil +} + +func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) { +} + +func (slows *slowQueries) EndpointMethod() string { + return cmdSlowSQLs +} diff --git a/vendor/github.com/newrelic/go-agent/internal/span_events.go b/vendor/github.com/newrelic/go-agent/internal/span_events.go new file mode 100644 index 00000000000..6a670008b85 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/span_events.go @@ -0,0 +1,143 @@ +package internal + +import ( + "bytes" + "time" +) + +// https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md + +type spanCategory string + +const ( + spanCategoryHTTP spanCategory = "http" + spanCategoryDatastore = "datastore" + spanCategoryGeneric = "generic" +) + +// SpanEvent represents a span event, necessary to support Distributed Tracing. +type SpanEvent struct { + TraceID string + GUID string + ParentID string + TransactionID string + Sampled bool + Priority Priority + Timestamp time.Time + Duration time.Duration + Name string + Category spanCategory + Component string + Kind string + IsEntrypoint bool + Attributes spanAttributeMap +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *SpanEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "Span") + w.stringField("traceId", e.TraceID) + w.stringField("guid", e.GUID) + if "" != e.ParentID { + w.stringField("parentId", e.ParentID) + } + w.stringField("transactionId", e.TransactionID) + w.boolField("sampled", e.Sampled) + w.writerField("priority", e.Priority) + w.intField("timestamp", e.Timestamp.UnixNano()/(1000*1000)) // in milliseconds + w.floatField("duration", e.Duration.Seconds()) + w.stringField("name", e.Name) + w.stringField("category", string(e.Category)) + if e.IsEntrypoint { + w.boolField("nr.entryPoint", true) + } + if e.Component != "" { + w.stringField("component", e.Component) + } + if e.Kind != "" { + w.stringField("span.kind", e.Kind) + } + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('{') + // user attributes section is unused + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('{') + + w = jsonFieldsWriter{buf: buf} + for key, val := range e.Attributes { + w.writerField(key.String(), val) + } + + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *SpanEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +type spanEvents struct { + *analyticsEvents +} + +func newSpanEvents(max int) *spanEvents { + return &spanEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (events *spanEvents) addEvent(e *SpanEvent, cat *BetterCAT) { + e.TraceID = cat.TraceID() + e.TransactionID = cat.ID + e.Sampled = cat.Sampled + e.Priority = cat.Priority + events.addEventPopulated(e) +} + +func (events *spanEvents) addEventPopulated(e *SpanEvent) { + events.analyticsEvents.addEvent(analyticsEvent{priority: e.Priority, jsonWriter: e}) +} + +// MergeFromTransaction merges the span events from a transaction into the +// harvest's span events. This should only be called if the transaction was +// sampled and span events are enabled. +func (events *spanEvents) MergeFromTransaction(txndata *TxnData) { + root := &SpanEvent{ + GUID: txndata.getRootSpanID(), + Timestamp: txndata.Start, + Duration: txndata.Duration, + Name: txndata.FinalName, + Category: spanCategoryGeneric, + IsEntrypoint: true, + } + if nil != txndata.BetterCAT.Inbound { + root.ParentID = txndata.BetterCAT.Inbound.ID + } + events.addEvent(root, &txndata.BetterCAT) + + for _, evt := range txndata.spanEvents { + events.addEvent(evt, &txndata.BetterCAT) + } +} + +func (events *spanEvents) MergeIntoHarvest(h *Harvest) { + h.SpanEvents.mergeFailed(events.analyticsEvents) +} + +func (events *spanEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *spanEvents) EndpointMethod() string { + return cmdSpanEvents +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stack_frame.go b/vendor/github.com/newrelic/go-agent/internal/stack_frame.go new file mode 100644 index 00000000000..837bcb7077e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stack_frame.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package internal + +import "runtime" + +func (st StackTrace) frames() []stacktraceFrame { + if len(st) == 0 { + return nil + } + frames := runtime.CallersFrames(st) // CallersFrames is only available in Go 1.7+ + fs := make([]stacktraceFrame, 0, maxStackTraceFrames) + var frame runtime.Frame + more := true + for more { + frame, more = frames.Next() + fs = append(fs, stacktraceFrame{ + Name: frame.Function, + File: frame.File, + Line: int64(frame.Line), + }) + } + return fs +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stack_frame_pre_1_7.go b/vendor/github.com/newrelic/go-agent/internal/stack_frame_pre_1_7.go new file mode 100644 index 00000000000..b9d824788c3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stack_frame_pre_1_7.go @@ -0,0 +1,34 @@ +// +build !go1.7 + +package internal + +import "runtime" + +func (st StackTrace) frames() []stacktraceFrame { + fs := make([]stacktraceFrame, len(st)) + for idx, pc := range st { + fs[idx] = lookupFrame(pc) + } + return fs +} + +func lookupFrame(pc uintptr) stacktraceFrame { + // The Golang runtime package documentation says "To look up the file + // and line number of the call itself, use pc[i]-1. As an exception to + // this rule, if pc[i-1] corresponds to the function runtime.sigpanic, + // then pc[i] is the program counter of a faulting instruction and + // should be used without any subtraction." + // + // TODO: Fully understand when this subtraction is necessary. + place := pc - 1 + f := runtime.FuncForPC(place) + if nil == f { + return stacktraceFrame{} + } + file, line := f.FileLine(place) + return stacktraceFrame{ + Name: f.Name(), + File: file, + Line: int64(line), + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stacktrace.go b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go new file mode 100644 index 00000000000..0a66d2795e6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "path" + "runtime" + "strings" +) + +// StackTrace is a stack trace. +type StackTrace []uintptr + +// GetStackTrace returns a new StackTrace. +func GetStackTrace() StackTrace { + skip := 1 // skip runtime.Callers + callers := make([]uintptr, maxStackTraceFrames) + written := runtime.Callers(skip, callers) + return callers[:written] +} + +type stacktraceFrame struct { + Name string + File string + Line int64 +} + +func (f stacktraceFrame) formattedName() string { + if strings.HasPrefix(f.Name, "go.") { + // This indicates an anonymous struct. eg. + // "go.(*struct { github.com/newrelic/go-agent.threadWithExtras }).NoticeError" + return f.Name + } + return path.Base(f.Name) +} + +func (f stacktraceFrame) isAgent() bool { + // Note this is not a contains conditional rather than a prefix + // conditional to handle anonymous functions like: + // "go.(*struct { github.com/newrelic/go-agent.threadWithExtras }).NoticeError" + return strings.Contains(f.Name, "github.com/newrelic/go-agent/internal.") || + strings.Contains(f.Name, "github.com/newrelic/go-agent.") +} + +func (f stacktraceFrame) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + if f.Name != "" { + w.stringField("name", f.formattedName()) + } + if f.File != "" { + w.stringField("filepath", f.File) + } + if f.Line != 0 { + w.intField("line", f.Line) + } + buf.WriteByte('}') +} + +func writeFrames(buf *bytes.Buffer, frames []stacktraceFrame) { + // Remove top agent frames. + for len(frames) > 0 && frames[0].isAgent() { + frames = frames[1:] + } + // Truncate excessively long stack traces (they may be provided by the + // customer). + if len(frames) > maxStackTraceFrames { + frames = frames[0:maxStackTraceFrames] + } + + buf.WriteByte('[') + for idx, frame := range frames { + if idx > 0 { + buf.WriteByte(',') + } + frame.WriteJSON(buf) + } + buf.WriteByte(']') +} + +// WriteJSON adds the stack trace to the buffer in the JSON form expected by the +// collector. +func (st StackTrace) WriteJSON(buf *bytes.Buffer) { + frames := st.frames() + writeFrames(buf, frames) +} + +// MarshalJSON prepares JSON in the format expected by the collector. +func (st StackTrace) MarshalJSON() ([]byte, error) { + estimate := 256 * len(st) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + st.WriteJSON(buf) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/bootid.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/bootid.go new file mode 100644 index 00000000000..780058d5ee8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/bootid.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "bytes" + "fmt" + "io/ioutil" + "runtime" +) + +// BootID returns the boot ID of the executing kernel. +func BootID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrFeatureUnsupported + } + data, err := ioutil.ReadFile("/proc/sys/kernel/random/boot_id") + if err != nil { + return "", err + } + + return validateBootID(data) +} + +type invalidBootID string + +func (e invalidBootID) Error() string { + return fmt.Sprintf("Boot id has unrecognized format, id=%q", string(e)) +} + +func isASCIIByte(b byte) bool { + return (b >= 0x20 && b <= 0x7f) +} + +func validateBootID(data []byte) (string, error) { + // We're going to go for the permissive reading of + // https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md: + // any ASCII (excluding control characters, because I'm pretty sure that's not + // in the spirit of the spec) string will be sent up to and including 128 + // bytes in length. + trunc := bytes.TrimSpace(data) + if len(trunc) > 128 { + trunc = trunc[:128] + } + for _, b := range trunc { + if !isASCIIByte(b) { + return "", invalidBootID(data) + } + } + + return string(trunc), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go new file mode 100644 index 00000000000..a4f7c004bc5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go @@ -0,0 +1,114 @@ +package sysinfo + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" +) + +var ( + // ErrDockerNotFound is returned if a Docker ID is not found in + // /proc/self/cgroup + ErrDockerNotFound = errors.New("Docker ID not found") +) + +// DockerID attempts to detect Docker. +func DockerID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrFeatureUnsupported + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseDockerID(f) +} + +var ( + // The DockerID must be a 64-character lowercase hex string + // be greedy and match anything 64-characters or longer to spot invalid IDs + dockerIDLength = 64 + dockerIDRegexRaw = fmt.Sprintf("[0-9a-f]{%d,}", dockerIDLength) + dockerIDRegex = regexp.MustCompile(dockerIDRegexRaw) +) + +func parseDockerID(r io.Reader) (string, error) { + // Each line in the cgroup file consists of three colon delimited fields. + // 1. hierarchy ID - we don't care about this + // 2. subsystems - comma separated list of cgroup subsystem names + // 3. control group - control group to which the process belongs + // + // Example + // 5:cpuacct,cpu,cpuset:/daemons + + var id string + + for scanner := bufio.NewScanner(r); scanner.Scan(); { + line := scanner.Bytes() + cols := bytes.SplitN(line, []byte(":"), 3) + + if len(cols) < 3 { + continue + } + + // We're only interested in the cpu subsystem. + if !isCPUCol(cols[1]) { + continue + } + + id = dockerIDRegex.FindString(string(cols[2])) + + if err := validateDockerID(id); err != nil { + // We can stop searching at this point, the CPU + // subsystem should only occur once, and its cgroup is + // not docker or not a format we accept. + return "", err + } + return id, nil + } + + return "", ErrDockerNotFound +} + +func isCPUCol(col []byte) bool { + // Sometimes we have multiple subsystems in one line, as in this example + // from: + // https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt + // + // 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope + splitCSV := func(r rune) bool { return r == ',' } + subsysCPU := []byte("cpu") + + for _, subsys := range bytes.FieldsFunc(col, splitCSV) { + if bytes.Equal(subsysCPU, subsys) { + return true + } + } + return false +} + +func isHex(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'f') +} + +func validateDockerID(id string) error { + if len(id) != 64 { + return fmt.Errorf("%s is not %d characters long", id, dockerIDLength) + } + + for _, c := range id { + if !isHex(c) { + return fmt.Errorf("Character: %c is not hex in string %s", c, id) + } + } + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/errors.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/errors.go new file mode 100644 index 00000000000..d4b684b5faa --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/errors.go @@ -0,0 +1,10 @@ +package sysinfo + +import ( + "errors" +) + +var ( + // ErrFeatureUnsupported indicates unsupported platform. + ErrFeatureUnsupported = errors.New("That feature is not supported on this platform") +) diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go new file mode 100644 index 00000000000..ccef4fcab57 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go @@ -0,0 +1,10 @@ +// +build !linux + +package sysinfo + +import "os" + +// Hostname returns the host name. +func Hostname() (string, error) { + return os.Hostname() +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go new file mode 100644 index 00000000000..e2300854d07 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "os" + "syscall" +) + +// Hostname returns the host name. +func Hostname() (string, error) { + // Try the builtin API first, which is designed to match the output of + // /bin/hostname, and fallback to uname(2) if that fails to match the + // behavior of gethostname(2) as implemented by glibc. On Linux, all + // these method should result in the same value because sethostname(2) + // limits the hostname to 64 bytes, the same size of the nodename field + // returned by uname(2). Note that is correspondence is not true on + // other platforms. + // + // os.Hostname failures should be exceedingly rare, however some systems + // configure SELinux to deny read access to /proc/sys/kernel/hostname. + // Redhat's OpenShift platform for example. os.Hostname can also fail if + // some or all of /proc has been hidden via chroot(2) or manipulation of + // the current processes' filesystem namespace via the cgroups APIs. + // Docker is an example of a tool that can configure such an + // environment. + name, err := os.Hostname() + if err == nil { + return name, nil + } + + var uts syscall.Utsname + if err2 := syscall.Uname(&uts); err2 != nil { + // The man page documents only one possible error for uname(2), + // suggesting that as long as the buffer given is valid, the + // call will never fail. Return the original error in the hope + // it provides more relevant information about why the hostname + // can't be retrieved. + return "", err + } + + // Convert Nodename to a Go string. + buf := make([]byte, 0, len(uts.Nodename)) + for _, c := range uts.Nodename { + if c == 0 { + break + } + buf = append(buf, byte(c)) + } + + return string(buf), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go new file mode 100644 index 00000000000..0763ee301a2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go @@ -0,0 +1,40 @@ +package sysinfo + +import ( + "bufio" + "errors" + "io" + "regexp" + "strconv" +) + +// BytesToMebibytes converts bytes into mebibytes. +func BytesToMebibytes(bts uint64) uint64 { + return bts / ((uint64)(1024 * 1024)) +} + +var ( + meminfoRe = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`) + errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo") +) + +// parseProcMeminfo is used to parse Linux's "/proc/meminfo". It is located +// here so that the relevant cross agent tests will be run on all platforms. +func parseProcMeminfo(f io.Reader) (uint64, error) { + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil { + kb, err := strconv.ParseUint(string(m[1]), 10, 64) + if err != nil { + return 0, err + } + return kb * 1024, nil + } + } + + err := scanner.Err() + if err == nil { + err = errMemTotalNotFound + } + return 0, err +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go new file mode 100644 index 00000000000..3c40f42d5df --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go @@ -0,0 +1,29 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + if bufLen != 8 { + return 0, syscall.EIO + } + + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go new file mode 100644 index 00000000000..2e82320ac72 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go @@ -0,0 +1,32 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + switch bufLen { + case 4: + return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil + case 8: + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + default: + return 0, syscall.EIO + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go new file mode 100644 index 00000000000..958e569937c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go @@ -0,0 +1,14 @@ +package sysinfo + +import "os" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + f, err := os.Open("/proc/meminfo") + if err != nil { + return 0, err + } + defer f.Close() + + return parseProcMeminfo(f) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go new file mode 100644 index 00000000000..4f1c818e552 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go @@ -0,0 +1,26 @@ +package sysinfo + +/* +#include +*/ +import "C" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // The function we're calling on Solaris is + // long sysconf(int name); + var pages C.long + var pagesizeBytes C.long + var err error + + pagesizeBytes, err = C.sysconf(C._SC_PAGE_SIZE) + if pagesizeBytes < 1 { + return 0, err + } + pages, err = C.sysconf(C._SC_PHYS_PAGES) + if pages < 1 { + return 0, err + } + + return uint64(pages) * uint64(pagesizeBytes), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go new file mode 100644 index 00000000000..b211317e1f4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go @@ -0,0 +1,23 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx + // http://stackoverflow.com/questions/30743070/query-total-physical-memory-in-windows-with-golang + mod := syscall.NewLazyDLL("kernel32.dll") + proc := mod.NewProc("GetPhysicallyInstalledSystemMemory") + var memkb uint64 + + ret, _, err := proc.Call(uintptr(unsafe.Pointer(&memkb))) + // return value TRUE(1) succeeds, FAILED(0) fails + if ret != 1 { + return 0, err + } + + return memkb * 1024, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go new file mode 100644 index 00000000000..071049edabf --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go @@ -0,0 +1,11 @@ +package sysinfo + +import ( + "time" +) + +// Usage contains process process times. +type Usage struct { + System time.Duration + User time.Duration +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go new file mode 100644 index 00000000000..3f7ab31f735 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +package sysinfo + +import ( + "syscall" + "time" +) + +func timevalToDuration(tv syscall.Timeval) time.Duration { + return time.Duration(tv.Nano()) * time.Nanosecond +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + ru := syscall.Rusage{} + err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: timevalToDuration(ru.Stime), + User: timevalToDuration(ru.Utime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go new file mode 100644 index 00000000000..8a8677a35b4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go @@ -0,0 +1,34 @@ +package sysinfo + +import ( + "syscall" + "time" +) + +func filetimeToDuration(ft *syscall.Filetime) time.Duration { + ns := ft.Nanoseconds() + return time.Duration(ns) +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + var creationTime syscall.Filetime + var exitTime syscall.Filetime + var kernelTime syscall.Filetime + var userTime syscall.Filetime + + handle, err := syscall.GetCurrentProcess() + if err != nil { + return Usage{}, err + } + + err = syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: filetimeToDuration(&kernelTime), + User: filetimeToDuration(&userTime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/trace_id_generator.go b/vendor/github.com/newrelic/go-agent/internal/trace_id_generator.go new file mode 100644 index 00000000000..3e6afd55758 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/trace_id_generator.go @@ -0,0 +1,31 @@ +package internal + +import ( + "fmt" + "math/rand" + "sync" +) + +// TraceIDGenerator creates identifiers for distributed tracing. +type TraceIDGenerator struct { + sync.Mutex + rnd *rand.Rand +} + +// NewTraceIDGenerator creates a new trace identifier generator. +func NewTraceIDGenerator(seed int64) *TraceIDGenerator { + return &TraceIDGenerator{ + rnd: rand.New(rand.NewSource(seed)), + } +} + +// GenerateTraceID creates a new trace identifier. +func (tg *TraceIDGenerator) GenerateTraceID() string { + tg.Lock() + defer tg.Unlock() + + u1 := tg.rnd.Uint32() + u2 := tg.rnd.Uint32() + bits := (uint64(u1) << 32) | uint64(u2) + return fmt.Sprintf("%016x", bits) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tracing.go b/vendor/github.com/newrelic/go-agent/internal/tracing.go new file mode 100644 index 00000000000..8cc236086a2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tracing.go @@ -0,0 +1,705 @@ +package internal + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/newrelic/go-agent/internal/cat" + "github.com/newrelic/go-agent/internal/jsonx" + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// MarshalJSON limits the number of decimals. +func (p *Priority) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(priorityFormat, *p)), nil +} + +// WriteJSON limits the number of decimals. +func (p Priority) WriteJSON(buf *bytes.Buffer) { + fmt.Fprintf(buf, priorityFormat, p) +} + +// TxnEvent represents a transaction. +// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md +// https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events +type TxnEvent struct { + FinalName string + Start time.Time + Duration time.Duration + TotalTime time.Duration + Queuing time.Duration + Zone ApdexZone + Attrs *Attributes + DatastoreExternalTotals + CrossProcess TxnCrossProcess + BetterCAT BetterCAT + HasError bool +} + +// BetterCAT stores the transaction's priority and all fields related +// to a DistributedTracer's Cross-Application Trace. +type BetterCAT struct { + Enabled bool + Priority Priority + Sampled bool + Inbound *Payload + ID string +} + +// TraceID returns the trace id. +func (e BetterCAT) TraceID() string { + if nil != e.Inbound { + return e.Inbound.TracedID + } + return e.ID +} + +// TxnData contains the recorded data of a transaction. +type TxnData struct { + TxnEvent + IsWeb bool + Name string // Work in progress name. + Errors TxnErrors // Lazily initialized. + Stop time.Time + ApdexThreshold time.Duration + + stamp segmentStamp + threadIDCounter uint64 + + TraceIDGenerator *TraceIDGenerator + LazilyCalculateSampled func() bool + SpanEventsEnabled bool + rootSpanID string + spanEvents []*SpanEvent + + customSegments map[string]*metricData + datastoreSegments map[DatastoreMetricKey]*metricData + externalSegments map[externalMetricKey]*metricData + + TxnTrace + + SlowQueriesEnabled bool + SlowQueryThreshold time.Duration + SlowQueries *slowQueries + + // These better CAT supportability fields are left outside of + // TxnEvent.BetterCAT to minimize the size of transaction event memory. + DistributedTracingSupport +} + +func (t *TxnData) saveTraceSegment(end segmentEnd, name string, attrs spanAttributeMap, externalGUID string) { + attrs = t.Attrs.filterSpanAttributes(attrs, destSegment) + t.TxnTrace.witnessNode(end, name, attrs, externalGUID) +} + +// Thread contains a segment stack that is used to track segment parenting time +// within a single goroutine. +type Thread struct { + threadID uint64 + stack []segmentFrame + // start and end are used to track the TotalTime this Thread was active. + start time.Time + end time.Time +} + +// RecordActivity indicates that activity happened at this time on this +// goroutine which helps track total time. +func (thread *Thread) RecordActivity(now time.Time) { + if thread.start.IsZero() || now.Before(thread.start) { + thread.start = now + } + if now.After(thread.end) { + thread.end = now + } +} + +// TotalTime returns the amount to time that this thread contributes to the +// total time. +func (thread *Thread) TotalTime() time.Duration { + if thread.start.Before(thread.end) { + return thread.end.Sub(thread.start) + } + return 0 +} + +// NewThread returns a new Thread to track segments in a new goroutine. +func NewThread(txndata *TxnData) *Thread { + // Each thread needs a unique ID. + txndata.threadIDCounter++ + return &Thread{ + threadID: txndata.threadIDCounter, + } +} + +type segmentStamp uint64 + +type segmentTime struct { + Stamp segmentStamp + Time time.Time +} + +// SegmentStartTime is embedded into the top level segments (rather than +// segmentTime) to minimize the structure sizes to minimize allocations. +type SegmentStartTime struct { + Stamp segmentStamp + Depth int +} + +type stringJSONWriter string + +func (s stringJSONWriter) WriteJSON(buf *bytes.Buffer) { + jsonx.AppendString(buf, string(s)) +} + +// spanAttributeMap is used for span attributes and segment attributes. The +// value is a jsonWriter to allow for segment query parameters. +type spanAttributeMap map[SpanAttribute]jsonWriter + +func (m *spanAttributeMap) addString(key SpanAttribute, val string) { + if "" != val { + m.add(key, stringJSONWriter(val)) + } +} + +func (m *spanAttributeMap) add(key SpanAttribute, val jsonWriter) { + if *m == nil { + *m = make(spanAttributeMap) + } + (*m)[key] = val +} + +func (m spanAttributeMap) copy() spanAttributeMap { + if len(m) == 0 { + return nil + } + cpy := make(spanAttributeMap, len(m)) + for k, v := range m { + cpy[k] = v + } + return cpy +} + +type segmentFrame struct { + segmentTime + children time.Duration + spanID string + attributes spanAttributeMap +} + +type segmentEnd struct { + start segmentTime + stop segmentTime + duration time.Duration + exclusive time.Duration + SpanID string + ParentID string + threadID uint64 + attributes spanAttributeMap +} + +func (end segmentEnd) spanEvent() *SpanEvent { + if "" == end.SpanID { + return nil + } + return &SpanEvent{ + GUID: end.SpanID, + ParentID: end.ParentID, + Timestamp: end.start.Time, + Duration: end.duration, + Attributes: end.attributes, + IsEntrypoint: false, + } +} + +const ( + datastoreProductUnknown = "Unknown" + datastoreOperationUnknown = "other" +) + +// HasErrors indicates whether the transaction had errors. +func (t *TxnData) HasErrors() bool { + return len(t.Errors) > 0 +} + +func (t *TxnData) time(now time.Time) segmentTime { + // Update the stamp before using it so that a 0 stamp can be special. + t.stamp++ + return segmentTime{ + Time: now, + Stamp: t.stamp, + } +} + +// AddAgentSpanAttribute allows attributes to be added to spans. +func (thread *Thread) AddAgentSpanAttribute(key SpanAttribute, val string) { + if len(thread.stack) > 0 { + thread.stack[len(thread.stack)-1].attributes.addString(key, val) + } +} + +// StartSegment begins a segment. +func StartSegment(t *TxnData, thread *Thread, now time.Time) SegmentStartTime { + tm := t.time(now) + thread.stack = append(thread.stack, segmentFrame{ + segmentTime: tm, + children: 0, + }) + + return SegmentStartTime{ + Stamp: tm.Stamp, + Depth: len(thread.stack) - 1, + } +} + +func (t *TxnData) getRootSpanID() string { + if "" == t.rootSpanID { + t.rootSpanID = t.TraceIDGenerator.GenerateTraceID() + } + return t.rootSpanID +} + +// CurrentSpanIdentifier returns the identifier of the span at the top of the +// segment stack. +func (t *TxnData) CurrentSpanIdentifier(thread *Thread) string { + if 0 == len(thread.stack) { + return t.getRootSpanID() + } + if "" == thread.stack[len(thread.stack)-1].spanID { + thread.stack[len(thread.stack)-1].spanID = t.TraceIDGenerator.GenerateTraceID() + } + return thread.stack[len(thread.stack)-1].spanID +} + +func (t *TxnData) saveSpanEvent(e *SpanEvent) { + e.Attributes = t.Attrs.filterSpanAttributes(e.Attributes, destSpan) + if len(t.spanEvents) < maxSpanEvents { + t.spanEvents = append(t.spanEvents, e) + } +} + +var ( + errMalformedSegment = errors.New("segment identifier malformed: perhaps unsafe code has modified it?") + errSegmentOrder = errors.New(`improper segment use: the Transaction must be used ` + + `in a single goroutine and segments must be ended in "last started first ended" order: ` + + `see https://github.com/newrelic/go-agent/blob/master/GUIDE.md#segments`) +) + +func endSegment(t *TxnData, thread *Thread, start SegmentStartTime, now time.Time) (segmentEnd, error) { + if 0 == start.Stamp { + return segmentEnd{}, errMalformedSegment + } + if start.Depth >= len(thread.stack) { + return segmentEnd{}, errSegmentOrder + } + if start.Depth < 0 { + return segmentEnd{}, errMalformedSegment + } + frame := thread.stack[start.Depth] + if start.Stamp != frame.Stamp { + return segmentEnd{}, errSegmentOrder + } + + var children time.Duration + for i := start.Depth; i < len(thread.stack); i++ { + children += thread.stack[i].children + } + s := segmentEnd{ + stop: t.time(now), + start: frame.segmentTime, + attributes: frame.attributes, + } + if s.stop.Time.After(s.start.Time) { + s.duration = s.stop.Time.Sub(s.start.Time) + } + if s.duration > children { + s.exclusive = s.duration - children + } + + // Note that we expect (depth == (len(t.stack) - 1)). However, if + // (depth < (len(t.stack) - 1)), that's ok: could be a panic popped + // some stack frames (and the consumer was not using defer). + + if start.Depth > 0 { + thread.stack[start.Depth-1].children += s.duration + } + + thread.stack = thread.stack[0:start.Depth] + + if t.SpanEventsEnabled && t.LazilyCalculateSampled() { + s.SpanID = frame.spanID + if "" == s.SpanID { + s.SpanID = t.TraceIDGenerator.GenerateTraceID() + } + // Note that the current span identifier is the parent's + // identifier because we've already popped the segment that's + // ending off of the stack. + s.ParentID = t.CurrentSpanIdentifier(thread) + } + + s.threadID = thread.threadID + + thread.RecordActivity(s.start.Time) + thread.RecordActivity(s.stop.Time) + + return s, nil +} + +// EndBasicSegment ends a basic segment. +func EndBasicSegment(t *TxnData, thread *Thread, start SegmentStartTime, now time.Time, name string) error { + end, err := endSegment(t, thread, start, now) + if nil != err { + return err + } + if nil == t.customSegments { + t.customSegments = make(map[string]*metricData) + } + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.customSegments[name]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.customSegments[name] = cpy + } + + if t.TxnTrace.considerNode(end) { + attributes := end.attributes.copy() + t.saveTraceSegment(end, customSegmentMetric(name), attributes, "") + } + + if evt := end.spanEvent(); evt != nil { + evt.Name = customSegmentMetric(name) + evt.Category = spanCategoryGeneric + t.saveSpanEvent(evt) + } + + return nil +} + +// EndExternalParams contains the parameters for EndExternalSegment. +type EndExternalParams struct { + TxnData *TxnData + Thread *Thread + Start SegmentStartTime + Now time.Time + Logger logger.Logger + Response *http.Response + URL *url.URL + Host string + Library string + Method string +} + +// EndExternalSegment ends an external segment. +func EndExternalSegment(p EndExternalParams) error { + t := p.TxnData + end, err := endSegment(t, p.Thread, p.Start, p.Now) + if nil != err { + return err + } + + // Use the Host field if present, otherwise use host in the URL. + if p.Host == "" && p.URL != nil { + p.Host = p.URL.Host + } + if p.Host == "" { + p.Host = "unknown" + } + if p.Library == "" { + p.Library = "http" + } + + var appData *cat.AppDataHeader + if p.Response != nil { + hdr := HTTPHeaderToAppData(p.Response.Header) + appData, err = t.CrossProcess.ParseAppData(hdr) + if err != nil { + if p.Logger.DebugEnabled() { + p.Logger.Debug("failure to parse cross application response header", map[string]interface{}{ + "err": err.Error(), + "header": hdr, + }) + } + } + } + + var crossProcessID string + var transactionName string + var transactionGUID string + if appData != nil { + crossProcessID = appData.CrossProcessID + transactionName = appData.TransactionName + transactionGUID = appData.TransactionGUID + } + + key := externalMetricKey{ + Host: p.Host, + Library: p.Library, + Method: p.Method, + ExternalCrossProcessID: crossProcessID, + ExternalTransactionName: transactionName, + } + if nil == t.externalSegments { + t.externalSegments = make(map[externalMetricKey]*metricData) + } + t.externalCallCount++ + t.externalDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.externalSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.externalSegments[key] = cpy + } + + if t.TxnTrace.considerNode(end) { + attributes := end.attributes.copy() + if p.Library == "http" { + attributes.addString(spanAttributeHTTPURL, SafeURL(p.URL)) + } + t.saveTraceSegment(end, key.scopedMetric(), attributes, transactionGUID) + } + + if evt := end.spanEvent(); evt != nil { + evt.Name = key.scopedMetric() + evt.Category = spanCategoryHTTP + evt.Kind = "client" + evt.Component = p.Library + if p.Library == "http" { + evt.Attributes.addString(spanAttributeHTTPURL, SafeURL(p.URL)) + evt.Attributes.addString(spanAttributeHTTPMethod, p.Method) + } + t.saveSpanEvent(evt) + } + + return nil +} + +// EndDatastoreParams contains the parameters for EndDatastoreSegment. +type EndDatastoreParams struct { + TxnData *TxnData + Thread *Thread + Start SegmentStartTime + Now time.Time + Product string + Collection string + Operation string + ParameterizedQuery string + QueryParameters map[string]interface{} + Host string + PortPathOrID string + Database string +} + +const ( + unknownDatastoreHost = "unknown" + unknownDatastorePortPathOrID = "unknown" +) + +var ( + // ThisHost is the system hostname. + ThisHost = func() string { + if h, err := sysinfo.Hostname(); nil == err { + return h + } + return unknownDatastoreHost + }() + hostsToReplace = map[string]struct{}{ + "localhost": {}, + "127.0.0.1": {}, + "0.0.0.0": {}, + "0:0:0:0:0:0:0:1": {}, + "::1": {}, + "0:0:0:0:0:0:0:0": {}, + "::": {}, + } +) + +func (t TxnData) slowQueryWorthy(d time.Duration) bool { + return t.SlowQueriesEnabled && (d >= t.SlowQueryThreshold) +} + +func datastoreSpanAddress(host, portPathOrID string) string { + if "" != host && "" != portPathOrID { + return host + ":" + portPathOrID + } + if "" != host { + return host + } + return portPathOrID +} + +// EndDatastoreSegment ends a datastore segment. +func EndDatastoreSegment(p EndDatastoreParams) error { + end, err := endSegment(p.TxnData, p.Thread, p.Start, p.Now) + if nil != err { + return err + } + if p.Operation == "" { + p.Operation = datastoreOperationUnknown + } + if p.Product == "" { + p.Product = datastoreProductUnknown + } + if p.Host == "" && p.PortPathOrID != "" { + p.Host = unknownDatastoreHost + } + if p.PortPathOrID == "" && p.Host != "" { + p.PortPathOrID = unknownDatastorePortPathOrID + } + if _, ok := hostsToReplace[p.Host]; ok { + p.Host = ThisHost + } + + // We still want to create a slowQuery if the consumer has not provided + // a Query string (or it has been removed by LASP) since the stack trace + // has value. + if p.ParameterizedQuery == "" { + collection := p.Collection + if "" == collection { + collection = "unknown" + } + p.ParameterizedQuery = fmt.Sprintf(`'%s' on '%s' using '%s'`, + p.Operation, collection, p.Product) + } + + key := DatastoreMetricKey{ + Product: p.Product, + Collection: p.Collection, + Operation: p.Operation, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + } + if nil == p.TxnData.datastoreSegments { + p.TxnData.datastoreSegments = make(map[DatastoreMetricKey]*metricData) + } + p.TxnData.datastoreCallCount++ + p.TxnData.datastoreDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := p.TxnData.datastoreSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + p.TxnData.datastoreSegments[key] = cpy + } + + scopedMetric := datastoreScopedMetric(key) + // errors in QueryParameters must not stop the recording of the segment + queryParams, err := vetQueryParameters(p.QueryParameters) + + if p.TxnData.TxnTrace.considerNode(end) { + attributes := end.attributes.copy() + attributes.addString(spanAttributeDBStatement, p.ParameterizedQuery) + attributes.addString(spanAttributeDBInstance, p.Database) + attributes.addString(spanAttributePeerAddress, datastoreSpanAddress(p.Host, p.PortPathOrID)) + attributes.addString(spanAttributePeerHostname, p.Host) + if len(queryParams) > 0 { + attributes.add(spanAttributeQueryParameters, queryParams) + } + p.TxnData.saveTraceSegment(end, scopedMetric, attributes, "") + } + + if p.TxnData.slowQueryWorthy(end.duration) { + if nil == p.TxnData.SlowQueries { + p.TxnData.SlowQueries = newSlowQueries(maxTxnSlowQueries) + } + p.TxnData.SlowQueries.observeInstance(slowQueryInstance{ + Duration: end.duration, + DatastoreMetric: scopedMetric, + ParameterizedQuery: p.ParameterizedQuery, + QueryParameters: queryParams, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + DatabaseName: p.Database, + StackTrace: GetStackTrace(), + }) + } + + if evt := end.spanEvent(); evt != nil { + evt.Name = scopedMetric + evt.Category = spanCategoryDatastore + evt.Kind = "client" + evt.Component = p.Product + evt.Attributes.addString(spanAttributeDBStatement, p.ParameterizedQuery) + evt.Attributes.addString(spanAttributeDBInstance, p.Database) + evt.Attributes.addString(spanAttributePeerAddress, datastoreSpanAddress(p.Host, p.PortPathOrID)) + evt.Attributes.addString(spanAttributePeerHostname, p.Host) + evt.Attributes.addString(spanAttributeDBCollection, p.Collection) + p.TxnData.saveSpanEvent(evt) + } + + return err +} + +// MergeBreakdownMetrics creates segment metrics. +func MergeBreakdownMetrics(t *TxnData, metrics *metricTable) { + scope := t.FinalName + isWeb := t.IsWeb + // Custom Segment Metrics + for key, data := range t.customSegments { + name := customSegmentMetric(key) + // Unscoped + metrics.add(name, "", *data, unforced) + // Scoped + metrics.add(name, scope, *data, unforced) + } + + // External Segment Metrics + for key, data := range t.externalSegments { + metrics.add(externalRollupMetric.all, "", *data, forced) + metrics.add(externalRollupMetric.webOrOther(isWeb), "", *data, forced) + + hostMetric := externalHostMetric(key) + metrics.add(hostMetric, "", *data, unforced) + if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName { + txnMetric := externalTransactionMetric(key) + + // Unscoped CAT metrics + metrics.add(externalAppMetric(key), "", *data, unforced) + metrics.add(txnMetric, "", *data, unforced) + } + + // Scoped External Metric + metrics.add(key.scopedMetric(), scope, *data, unforced) + } + + // Datastore Segment Metrics + for key, data := range t.datastoreSegments { + metrics.add(datastoreRollupMetric.all, "", *data, forced) + metrics.add(datastoreRollupMetric.webOrOther(isWeb), "", *data, forced) + + product := datastoreProductMetric(key) + metrics.add(product.all, "", *data, forced) + metrics.add(product.webOrOther(isWeb), "", *data, forced) + + if key.Host != "" && key.PortPathOrID != "" { + instance := datastoreInstanceMetric(key) + metrics.add(instance, "", *data, unforced) + } + + operation := datastoreOperationMetric(key) + metrics.add(operation, "", *data, unforced) + + if "" != key.Collection { + statement := datastoreStatementMetric(key) + + metrics.add(statement, "", *data, unforced) + metrics.add(statement, scope, *data, unforced) + } else { + metrics.add(operation, scope, *data, unforced) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_cross_process.go b/vendor/github.com/newrelic/go-agent/internal/txn_cross_process.go new file mode 100644 index 00000000000..5b251a1b3da --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_cross_process.go @@ -0,0 +1,417 @@ +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/newrelic/go-agent/internal/cat" +) + +// Bitfield values for the TxnCrossProcess.Type field. +const ( + txnCrossProcessSynthetics = (1 << 0) + txnCrossProcessInbound = (1 << 1) + txnCrossProcessOutbound = (1 << 2) +) + +var ( + // ErrAccountNotTrusted indicates that, while the inbound headers were valid, + // the account ID within them is not trusted by the user's application. + ErrAccountNotTrusted = errors.New("account not trusted") +) + +// TxnCrossProcess contains the metadata required for CAT and Synthetics +// headers, transaction events, and traces. +type TxnCrossProcess struct { + // The user side switch controlling whether CAT is enabled or not. + Enabled bool + + // The user side switch controlling whether Distributed Tracing is enabled or not + // This is required by synthetics support. If Distributed Tracing is enabled, + // any synthetics functionality that is triggered should not set nr.guid. + DistributedTracingEnabled bool + + // Rather than copying in the entire ConnectReply, here are the fields that + // we need to support CAT. + CrossProcessID []byte + EncodingKey []byte + TrustedAccounts trustedAccountSet + + // CAT state for a given transaction. + Type uint8 + ClientID string + GUID string + TripID string + PathHash string + AlternatePathHashes map[string]bool + ReferringPathHash string + ReferringTxnGUID string + Synthetics *cat.SyntheticsHeader + + // The encoded synthetics header received as part of the request headers, if + // any. By storing this here, we avoid needing to marshal the invariant + // Synthetics struct above each time an external segment is created. + SyntheticsHeader string +} + +// CrossProcessMetadata represents the metadata that must be transmitted with +// an external request for CAT to work. +type CrossProcessMetadata struct { + ID string + TxnData string + Synthetics string +} + +// Init initialises a TxnCrossProcess based on the given application connect +// reply. +func (txp *TxnCrossProcess) Init(enabled bool, dt bool, reply *ConnectReply) { + txp.CrossProcessID = []byte(reply.CrossProcessID) + txp.EncodingKey = []byte(reply.EncodingKey) + txp.DistributedTracingEnabled = dt + txp.Enabled = enabled + txp.TrustedAccounts = reply.TrustedAccounts +} + +// CreateCrossProcessMetadata generates request metadata that enable CAT and +// Synthetics support for an external segment. +func (txp *TxnCrossProcess) CreateCrossProcessMetadata(txnName, appName string) (CrossProcessMetadata, error) { + metadata := CrossProcessMetadata{} + + // Regardless of the user's CAT settings, if there was a synthetics header in + // the inbound request, a synthetics header should always be included in the + // outbound request headers. + if txp.IsSynthetics() { + metadata.Synthetics = txp.SyntheticsHeader + } + + if txp.Enabled { + txp.SetOutbound(true) + txp.requireTripID() + + id, err := txp.outboundID() + if err != nil { + return metadata, err + } + + txnData, err := txp.outboundTxnData(txnName, appName) + if err != nil { + return metadata, err + } + + metadata.ID = id + metadata.TxnData = txnData + } + + return metadata, nil +} + +// Finalise handles any end-of-transaction tasks. In practice, this simply +// means ensuring the path hash is set if it hasn't already been. +func (txp *TxnCrossProcess) Finalise(txnName, appName string) error { + if txp.Enabled && txp.Used() { + _, err := txp.setPathHash(txnName, appName) + return err + } + + // If there was no CAT activity, then do nothing, successfully. + return nil +} + +// IsInbound returns true if the transaction had inbound CAT headers. +func (txp *TxnCrossProcess) IsInbound() bool { + return 0 != (txp.Type & txnCrossProcessInbound) +} + +// IsOutbound returns true if the transaction has generated outbound CAT +// headers. +func (txp *TxnCrossProcess) IsOutbound() bool { + // We don't actually use this anywhere today, but it feels weird not having + // it. + return 0 != (txp.Type & txnCrossProcessOutbound) +} + +// IsSynthetics returns true if the transaction had inbound Synthetics headers. +func (txp *TxnCrossProcess) IsSynthetics() bool { + // Technically, this is redundant: the presence of a non-nil Synthetics + // pointer should be sufficient to determine if this is a synthetics + // transaction. Nevertheless, it's convenient to have the Type field be + // non-zero if any CAT behaviour has occurred. + return 0 != (txp.Type&txnCrossProcessSynthetics) && nil != txp.Synthetics +} + +// ParseAppData decodes the given appData value. +func (txp *TxnCrossProcess) ParseAppData(encodedAppData string) (*cat.AppDataHeader, error) { + if !txp.Enabled { + return nil, nil + } + if encodedAppData != "" { + rawAppData, err := Deobfuscate(encodedAppData, txp.EncodingKey) + if err != nil { + return nil, err + } + + appData := &cat.AppDataHeader{} + if err := json.Unmarshal(rawAppData, appData); err != nil { + return nil, err + } + + return appData, nil + } + + return nil, nil +} + +// CreateAppData creates the appData value that should be sent with a response +// to ensure CAT operates as expected. +func (txp *TxnCrossProcess) CreateAppData(name string, queueTime, responseTime time.Duration, contentLength int64) (string, error) { + // If CAT is disabled, do nothing, successfully. + if !txp.Enabled { + return "", nil + } + + data, err := json.Marshal(&cat.AppDataHeader{ + CrossProcessID: string(txp.CrossProcessID), + TransactionName: name, + QueueTimeInSeconds: queueTime.Seconds(), + ResponseTimeInSeconds: responseTime.Seconds(), + ContentLength: contentLength, + TransactionGUID: txp.GUID, + }) + if err != nil { + return "", err + } + + obfuscated, err := Obfuscate(data, txp.EncodingKey) + if err != nil { + return "", err + } + + return obfuscated, nil +} + +// Used returns true if any CAT or Synthetics related functionality has been +// triggered on the transaction. +func (txp *TxnCrossProcess) Used() bool { + return 0 != txp.Type +} + +// SetInbound sets the inbound CAT flag. This function is provided only for +// internal and unit testing purposes, and should not be used outside of this +// package normally. +func (txp *TxnCrossProcess) SetInbound(inbound bool) { + if inbound { + txp.Type |= txnCrossProcessInbound + } else { + txp.Type &^= txnCrossProcessInbound + } +} + +// SetOutbound sets the outbound CAT flag. This function is provided only for +// internal and unit testing purposes, and should not be used outside of this +// package normally. +func (txp *TxnCrossProcess) SetOutbound(outbound bool) { + if outbound { + txp.Type |= txnCrossProcessOutbound + } else { + txp.Type &^= txnCrossProcessOutbound + } +} + +// SetSynthetics sets the Synthetics CAT flag. This function is provided only +// for internal and unit testing purposes, and should not be used outside of +// this package normally. +func (txp *TxnCrossProcess) SetSynthetics(synthetics bool) { + if synthetics { + txp.Type |= txnCrossProcessSynthetics + } else { + txp.Type &^= txnCrossProcessSynthetics + } +} + +// handleInboundRequestHeaders parses the CAT headers from the given metadata +// and updates the relevant fields on the provided TxnData. +func (txp *TxnCrossProcess) handleInboundRequestHeaders(metadata CrossProcessMetadata) error { + if txp.Enabled && metadata.ID != "" && metadata.TxnData != "" { + if err := txp.handleInboundRequestEncodedCAT(metadata.ID, metadata.TxnData); err != nil { + return err + } + } + + if metadata.Synthetics != "" { + if err := txp.handleInboundRequestEncodedSynthetics(metadata.Synthetics); err != nil { + return err + } + } + + return nil +} + +func (txp *TxnCrossProcess) handleInboundRequestEncodedCAT(encodedID, encodedTxnData string) error { + rawID, err := Deobfuscate(encodedID, txp.EncodingKey) + if err != nil { + return err + } + + rawTxnData, err := Deobfuscate(encodedTxnData, txp.EncodingKey) + if err != nil { + return err + } + + if err := txp.handleInboundRequestID(rawID); err != nil { + return err + } + + return txp.handleInboundRequestTxnData(rawTxnData) +} + +func (txp *TxnCrossProcess) handleInboundRequestID(raw []byte) error { + id, err := cat.NewIDHeader(raw) + if err != nil { + return err + } + + if !txp.TrustedAccounts.IsTrusted(id.AccountID) { + return ErrAccountNotTrusted + } + + txp.SetInbound(true) + txp.ClientID = string(raw) + txp.setRequireGUID() + + return nil +} + +func (txp *TxnCrossProcess) handleInboundRequestTxnData(raw []byte) error { + txnData := &cat.TxnDataHeader{} + if err := json.Unmarshal(raw, txnData); err != nil { + return err + } + + txp.SetInbound(true) + if txnData.TripID != "" { + txp.TripID = txnData.TripID + } else { + txp.setRequireGUID() + txp.TripID = txp.GUID + } + txp.ReferringTxnGUID = txnData.GUID + txp.ReferringPathHash = txnData.PathHash + + return nil +} + +func (txp *TxnCrossProcess) handleInboundRequestEncodedSynthetics(encoded string) error { + raw, err := Deobfuscate(encoded, txp.EncodingKey) + if err != nil { + return err + } + + if err := txp.handleInboundRequestSynthetics(raw); err != nil { + return err + } + + txp.SyntheticsHeader = encoded + return nil +} + +func (txp *TxnCrossProcess) handleInboundRequestSynthetics(raw []byte) error { + synthetics := &cat.SyntheticsHeader{} + if err := json.Unmarshal(raw, synthetics); err != nil { + return err + } + + // The specced behaviour here if the account isn't trusted is to disable the + // synthetics handling, but not CAT in general, so we won't return an error + // here. + if txp.TrustedAccounts.IsTrusted(synthetics.AccountID) { + txp.SetSynthetics(true) + txp.setRequireGUID() + txp.Synthetics = synthetics + } + + return nil +} + +func (txp *TxnCrossProcess) outboundID() (string, error) { + return Obfuscate(txp.CrossProcessID, txp.EncodingKey) +} + +func (txp *TxnCrossProcess) outboundTxnData(txnName, appName string) (string, error) { + pathHash, err := txp.setPathHash(txnName, appName) + if err != nil { + return "", err + } + + data, err := json.Marshal(&cat.TxnDataHeader{ + GUID: txp.GUID, + TripID: txp.TripID, + PathHash: pathHash, + }) + if err != nil { + return "", err + } + + return Obfuscate(data, txp.EncodingKey) +} + +// setRequireGUID ensures that the transaction has a valid GUID, and sets the +// nr.guid and trip ID if they are not already set. If the customer has enabled +// DistributedTracing, then the new style of guid will be set elsewhere. +func (txp *TxnCrossProcess) setRequireGUID() { + if txp.DistributedTracingEnabled { + return + } + + if txp.GUID != "" { + return + } + + txp.GUID = fmt.Sprintf("%x", RandUint64()) + + if txp.TripID == "" { + txp.requireTripID() + } +} + +// requireTripID ensures that the transaction has a valid trip ID. +func (txp *TxnCrossProcess) requireTripID() { + if !txp.Enabled { + return + } + if txp.TripID != "" { + return + } + + txp.setRequireGUID() + txp.TripID = txp.GUID +} + +// setPathHash generates a path hash, sets the transaction's path hash to +// match, and returns it. This function will also ensure that the alternate +// path hashes are correctly updated. +func (txp *TxnCrossProcess) setPathHash(txnName, appName string) (string, error) { + pathHash, err := cat.GeneratePathHash(txp.ReferringPathHash, txnName, appName) + if err != nil { + return "", err + } + + if pathHash != txp.PathHash { + if txp.PathHash != "" { + // Lazily initialise the alternate path hashes if they haven't been + // already. + if txp.AlternatePathHashes == nil { + txp.AlternatePathHashes = make(map[string]bool) + } + + // The spec limits us to a maximum of 10 alternate path hashes. + if len(txp.AlternatePathHashes) < 10 { + txp.AlternatePathHashes[txp.PathHash] = true + } + } + txp.PathHash = pathHash + } + + return pathHash, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_events.go b/vendor/github.com/newrelic/go-agent/internal/txn_events.go new file mode 100644 index 00000000000..02477f669ad --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_events.go @@ -0,0 +1,194 @@ +package internal + +import ( + "bytes" + "sort" + "strings" + "time" +) + +// DatastoreExternalTotals contains overview of external and datastore calls +// made during a transaction. +type DatastoreExternalTotals struct { + externalCallCount uint64 + externalDuration time.Duration + datastoreCallCount uint64 + datastoreDuration time.Duration +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *TxnEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "Transaction") + w.stringField("name", e.FinalName) + w.floatField("timestamp", timeToFloatSeconds(e.Start)) + if ApdexNone != e.Zone { + w.stringField("nr.apdexPerfZone", e.Zone.label()) + } + + w.boolField("error", e.HasError) + + sharedTransactionIntrinsics(e, &w) + + // totalTime gets put into transaction events but not error events: + // https://source.datanerd.us/agents/agent-specs/blob/master/Total-Time-Async.md#attributes + w.floatField("totalTime", e.TotalTime.Seconds()) + + // Write better CAT intrinsics if enabled + sharedBetterCATIntrinsics(e, &w) + + if e.BetterCAT.Enabled { + if p := e.BetterCAT.Inbound; nil != p { + if "" != p.TransactionID { + w.stringField("parentId", p.TransactionID) + } + + if "" != p.ID { + w.stringField("parentSpanId", p.ID) + } + } + } + + // Write old CAT intrinsics if enabled + oldCATIntrinsics(e, &w) + + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destTxnEvent, nil) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destTxnEvent) + buf.WriteByte(']') +} + +// oldCATIntrinsics reports old CAT intrinsics for Transaction +// if CrossProcess.Used() is true +func oldCATIntrinsics(e *TxnEvent, w *jsonFieldsWriter) { + if !e.CrossProcess.Used() { + return + } + + if e.CrossProcess.ClientID != "" { + w.stringField("client_cross_process_id", e.CrossProcess.ClientID) + } + if e.CrossProcess.TripID != "" { + w.stringField("nr.tripId", e.CrossProcess.TripID) + } + if e.CrossProcess.PathHash != "" { + w.stringField("nr.pathHash", e.CrossProcess.PathHash) + } + if e.CrossProcess.ReferringPathHash != "" { + w.stringField("nr.referringPathHash", e.CrossProcess.ReferringPathHash) + } + if e.CrossProcess.GUID != "" { + w.stringField("nr.guid", e.CrossProcess.GUID) + } + if e.CrossProcess.ReferringTxnGUID != "" { + w.stringField("nr.referringTransactionGuid", e.CrossProcess.ReferringTxnGUID) + } + if len(e.CrossProcess.AlternatePathHashes) > 0 { + hashes := make([]string, 0, len(e.CrossProcess.AlternatePathHashes)) + for hash := range e.CrossProcess.AlternatePathHashes { + hashes = append(hashes, hash) + } + sort.Strings(hashes) + w.stringField("nr.alternatePathHashes", strings.Join(hashes, ",")) + } +} + +// sharedTransactionIntrinsics reports intrinsics that are shared +// by Transaction and TransactionError +func sharedTransactionIntrinsics(e *TxnEvent, w *jsonFieldsWriter) { + w.floatField("duration", e.Duration.Seconds()) + if e.Queuing > 0 { + w.floatField("queueDuration", e.Queuing.Seconds()) + } + if e.externalCallCount > 0 { + w.intField("externalCallCount", int64(e.externalCallCount)) + w.floatField("externalDuration", e.externalDuration.Seconds()) + } + if e.datastoreCallCount > 0 { + // Note that "database" is used for the keys here instead of + // "datastore" for historical reasons. + w.intField("databaseCallCount", int64(e.datastoreCallCount)) + w.floatField("databaseDuration", e.datastoreDuration.Seconds()) + } + + if e.CrossProcess.IsSynthetics() { + w.stringField("nr.syntheticsResourceId", e.CrossProcess.Synthetics.ResourceID) + w.stringField("nr.syntheticsJobId", e.CrossProcess.Synthetics.JobID) + w.stringField("nr.syntheticsMonitorId", e.CrossProcess.Synthetics.MonitorID) + } +} + +// sharedBetterCATIntrinsics reports intrinsics that are shared +// by Transaction, TransactionError, and Slow SQL +func sharedBetterCATIntrinsics(e *TxnEvent, w *jsonFieldsWriter) { + if e.BetterCAT.Enabled { + if p := e.BetterCAT.Inbound; nil != p { + w.stringField("parent.type", p.Type) + w.stringField("parent.app", p.App) + w.stringField("parent.account", p.Account) + w.stringField("parent.transportType", p.TransportType) + w.floatField("parent.transportDuration", p.TransportDuration.Seconds()) + } + + w.stringField("guid", e.BetterCAT.ID) + w.stringField("traceId", e.BetterCAT.TraceID()) + w.writerField("priority", e.BetterCAT.Priority) + w.boolField("sampled", e.BetterCAT.Sampled) + } +} + +// MarshalJSON is used for testing. +func (e *TxnEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +type txnEvents struct { + *analyticsEvents +} + +func newTxnEvents(max int) *txnEvents { + return &txnEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (events *txnEvents) AddTxnEvent(e *TxnEvent, priority Priority) { + // Synthetics events always get priority: normal event priorities are in the + // range [0.0,1.99999], so adding 2 means that a Synthetics event will always + // win. + if e.CrossProcess.IsSynthetics() { + priority += 2.0 + } + events.addEvent(analyticsEvent{priority: priority, jsonWriter: e}) +} + +func (events *txnEvents) MergeIntoHarvest(h *Harvest) { + h.TxnEvents.mergeFailed(events.analyticsEvents) +} + +func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *txnEvents) EndpointMethod() string { + return cmdTxnEvents +} + +func (events *txnEvents) payloads(limit int) []PayloadCreator { + if events.NumSaved() < float64(limit) { + return []PayloadCreator{events} + } + e1, e2 := events.split() + return []PayloadCreator{ + &txnEvents{analyticsEvents: e1}, + &txnEvents{analyticsEvents: e2}, + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_trace.go b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go new file mode 100644 index 00000000000..04b2f6e6d98 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go @@ -0,0 +1,447 @@ +package internal + +import ( + "bytes" + "container/heap" + "sort" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// See https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Trace-LEGACY.md + +type traceNodeHeap []traceNode + +type traceNodeParams struct { + attributes map[SpanAttribute]jsonWriter + StackTrace StackTrace + TransactionGUID string + exclusiveDurationMillis *float64 +} + +type traceNode struct { + start segmentTime + stop segmentTime + threadID uint64 + duration time.Duration + traceNodeParams + name string +} + +func (h traceNodeHeap) Len() int { return len(h) } +func (h traceNodeHeap) Less(i, j int) bool { return h[i].duration < h[j].duration } +func (h traceNodeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h traceNodeHeap) Push(x interface{}) {} +func (h traceNodeHeap) Pop() interface{} { return nil } + +// TxnTrace contains the work in progress transaction trace. +type TxnTrace struct { + Enabled bool + SegmentThreshold time.Duration + StackTraceThreshold time.Duration + nodes traceNodeHeap + maxNodes int +} + +// getMaxNodes allows the maximum number of nodes to be overwritten for unit +// tests. +func (trace *TxnTrace) getMaxNodes() int { + if 0 != trace.maxNodes { + return trace.maxNodes + } + return maxTxnTraceNodes +} + +// considerNode exists to prevent unnecessary calls to witnessNode: constructing +// the metric name and params map requires allocations. +func (trace *TxnTrace) considerNode(end segmentEnd) bool { + return trace.Enabled && (end.duration >= trace.SegmentThreshold) +} + +func (trace *TxnTrace) witnessNode(end segmentEnd, name string, attrs spanAttributeMap, externalGUID string) { + node := traceNode{ + start: end.start, + stop: end.stop, + duration: end.duration, + threadID: end.threadID, + name: name, + } + node.attributes = attrs + node.TransactionGUID = externalGUID + if !trace.considerNode(end) { + return + } + if trace.nodes == nil { + trace.nodes = make(traceNodeHeap, 0, startingTxnTraceNodes) + } + if end.exclusive >= trace.StackTraceThreshold { + node.StackTrace = GetStackTrace() + } + if max := trace.getMaxNodes(); len(trace.nodes) < max { + trace.nodes = append(trace.nodes, node) + if len(trace.nodes) == max { + heap.Init(trace.nodes) + } + return + } + + if node.duration <= trace.nodes[0].duration { + return + } + trace.nodes[0] = node + heap.Fix(trace.nodes, 0) +} + +// HarvestTrace contains a finished transaction trace ready for serialization to +// the collector. +type HarvestTrace struct { + TxnEvent + Trace TxnTrace +} + +type nodeDetails struct { + name string + relativeStart time.Duration + relativeStop time.Duration + traceNodeParams +} + +func printNodeStart(buf *bytes.Buffer, n nodeDetails) { + // time.Seconds() is intentionally not used here. Millisecond + // precision is enough. + relativeStartMillis := n.relativeStart.Nanoseconds() / (1000 * 1000) + relativeStopMillis := n.relativeStop.Nanoseconds() / (1000 * 1000) + + buf.WriteByte('[') + jsonx.AppendInt(buf, relativeStartMillis) + buf.WriteByte(',') + jsonx.AppendInt(buf, relativeStopMillis) + buf.WriteByte(',') + jsonx.AppendString(buf, n.name) + buf.WriteByte(',') + + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if nil != n.StackTrace { + w.writerField("backtrace", n.StackTrace) + } + if nil != n.exclusiveDurationMillis { + w.floatField("exclusive_duration_millis", *n.exclusiveDurationMillis) + } + if "" != n.TransactionGUID { + w.stringField("transaction_guid", n.TransactionGUID) + } + for k, v := range n.attributes { + w.writerField(k.String(), v) + } + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('[') +} + +func printChildren(buf *bytes.Buffer, traceStart time.Time, nodes sortedTraceNodes, next int, stop *segmentStamp, threadID uint64) int { + firstChild := true + for { + if next >= len(nodes) { + // No more children to print. + break + } + if nodes[next].threadID != threadID { + // The next node is not of the same thread. Due to the + // node sorting, all nodes of the same thread should be + // together. + break + } + if stop != nil && nodes[next].start.Stamp >= *stop { + // Make sure this node is a child of the parent that is + // being printed. + break + } + if firstChild { + firstChild = false + } else { + buf.WriteByte(',') + } + printNodeStart(buf, nodeDetails{ + name: nodes[next].name, + relativeStart: nodes[next].start.Time.Sub(traceStart), + relativeStop: nodes[next].stop.Time.Sub(traceStart), + traceNodeParams: nodes[next].traceNodeParams, + }) + next = printChildren(buf, traceStart, nodes, next+1, &nodes[next].stop.Stamp, threadID) + buf.WriteString("]]") + + } + return next +} + +type sortedTraceNodes []*traceNode + +func (s sortedTraceNodes) Len() int { return len(s) } +func (s sortedTraceNodes) Less(i, j int) bool { + // threadID is the first sort key and start.Stamp is the second key. + if s[i].threadID == s[j].threadID { + return s[i].start.Stamp < s[j].start.Stamp + } + return s[i].threadID < s[j].threadID +} +func (s sortedTraceNodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// MarshalJSON is used for testing. +// +// TODO: Eliminate this entirely by using harvestTraces.Data(). +func (trace *HarvestTrace) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 100+100*trace.Trace.nodes.Len())) + + trace.writeJSON(buf) + + return buf.Bytes(), nil +} + +func (trace *HarvestTrace) writeJSON(buf *bytes.Buffer) { + nodes := make(sortedTraceNodes, len(trace.Trace.nodes)) + for i := 0; i < len(nodes); i++ { + nodes[i] = &trace.Trace.nodes[i] + } + sort.Sort(nodes) + + buf.WriteByte('[') // begin trace + + jsonx.AppendInt(buf, trace.Start.UnixNano()/1000) + buf.WriteByte(',') + jsonx.AppendFloat(buf, trace.Duration.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendString(buf, trace.FinalName) + buf.WriteByte(',') + if uri, _ := trace.Attrs.GetAgentValue(attributeRequestURI, destTxnTrace); "" != uri { + jsonx.AppendString(buf, uri) + } else { + buf.WriteString("null") + } + buf.WriteByte(',') + + buf.WriteByte('[') // begin trace data + + // If the trace string pool is used, insert another array here. + + jsonx.AppendFloat(buf, 0.0) // unused timestamp + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly request parameters + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly custom parameters + buf.WriteByte(',') // + + printNodeStart(buf, nodeDetails{ // begin outer root + name: "ROOT", + relativeStart: 0, + relativeStop: trace.Duration, + }) + + // exclusive_duration_millis field is added to fix the transaction trace + // summary tab. If exclusive_duration_millis is not provided, the UIs + // will calculate exclusive time, which doesn't work for this root node + // since all async goroutines are children of this root. + exclusiveDurationMillis := trace.Duration.Seconds() * 1000.0 + details := nodeDetails{ // begin inner root + name: trace.FinalName, + relativeStart: 0, + relativeStop: trace.Duration, + } + details.exclusiveDurationMillis = &exclusiveDurationMillis + printNodeStart(buf, details) + + for next := 0; next < len(nodes); { + if next > 0 { + buf.WriteByte(',') + } + // We put each thread's nodes into the root node instead of the + // node that spawned the thread. This approach is simple and + // works when the segment which spawned a thread has been pruned + // from the trace. Each call to printChildren prints one + // thread. + next = printChildren(buf, trace.Start, nodes, next, nil, nodes[next].threadID) + } + + buf.WriteString("]]") // end outer root + buf.WriteString("]]") // end inner root + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"agentAttributes":`) + agentAttributesJSON(trace.Attrs, buf, destTxnTrace) + buf.WriteByte(',') + buf.WriteString(`"userAttributes":`) + userAttributesJSON(trace.Attrs, buf, destTxnTrace, nil) + buf.WriteByte(',') + buf.WriteString(`"intrinsics":`) + intrinsicsJSON(&trace.TxnEvent, buf) + buf.WriteByte('}') + + // If the trace string pool is used, end another array here. + + buf.WriteByte(']') // end trace data + + buf.WriteByte(',') + if trace.CrossProcess.Used() && trace.CrossProcess.GUID != "" { + jsonx.AppendString(buf, trace.CrossProcess.GUID) + } else { + buf.WriteString(`""`) + } + buf.WriteByte(',') // + buf.WriteString(`null`) // reserved for future use + buf.WriteByte(',') // + buf.WriteString(`false`) // ForcePersist is not yet supported + buf.WriteByte(',') // + buf.WriteString(`null`) // X-Ray sessions not supported + buf.WriteByte(',') // + + // Synthetics are supported: + if trace.CrossProcess.IsSynthetics() { + jsonx.AppendString(buf, trace.CrossProcess.Synthetics.ResourceID) + } else { + buf.WriteString(`""`) + } + + buf.WriteByte(']') // end trace +} + +type txnTraceHeap []*HarvestTrace + +func (h *txnTraceHeap) isEmpty() bool { + return 0 == len(*h) +} + +func newTxnTraceHeap(max int) *txnTraceHeap { + h := make(txnTraceHeap, 0, max) + heap.Init(&h) + return &h +} + +// Implement sort.Interface. +func (h txnTraceHeap) Len() int { return len(h) } +func (h txnTraceHeap) Less(i, j int) bool { return h[i].Duration < h[j].Duration } +func (h txnTraceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Implement heap.Interface. +func (h *txnTraceHeap) Push(x interface{}) { *h = append(*h, x.(*HarvestTrace)) } + +func (h *txnTraceHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +func (h *txnTraceHeap) isKeeper(t *HarvestTrace) bool { + if len(*h) < cap(*h) { + return true + } + return t.Duration >= (*h)[0].Duration +} + +func (h *txnTraceHeap) addTxnTrace(t *HarvestTrace) { + if len(*h) < cap(*h) { + heap.Push(h, t) + return + } + + if t.Duration <= (*h)[0].Duration { + return + } + heap.Pop(h) + heap.Push(h, t) +} + +type harvestTraces struct { + regular *txnTraceHeap + synthetics *txnTraceHeap +} + +func newHarvestTraces() *harvestTraces { + return &harvestTraces{ + regular: newTxnTraceHeap(maxRegularTraces), + synthetics: newTxnTraceHeap(maxSyntheticsTraces), + } +} + +func (traces *harvestTraces) Len() int { + return traces.regular.Len() + traces.synthetics.Len() +} + +func (traces *harvestTraces) Witness(trace HarvestTrace) { + traceHeap := traces.regular + if trace.CrossProcess.IsSynthetics() { + traceHeap = traces.synthetics + } + + if traceHeap.isKeeper(&trace) { + cpy := new(HarvestTrace) + *cpy = trace + traceHeap.addTxnTrace(cpy) + } +} + +func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if traces.Len() == 0 { + return nil, nil + } + + // This estimate is used to guess the size of the buffer. No worries if + // the estimate is small since the buffer will be lengthened as + // necessary. This is just about minimizing reallocations. + estimate := 512 + for _, t := range *traces.regular { + estimate += 100 * t.Trace.nodes.Len() + } + for _, t := range *traces.synthetics { + estimate += 100 * t.Trace.nodes.Len() + } + + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('[') + + // use a function to add traces to the buffer to avoid duplicating comma + // logic in both loops + firstTrace := true + addTrace := func(trace *HarvestTrace) { + if firstTrace { + firstTrace = false + } else { + buf.WriteByte(',') + } + trace.writeJSON(buf) + } + + for _, trace := range *traces.regular { + addTrace(trace) + } + for _, trace := range *traces.synthetics { + addTrace(trace) + } + buf.WriteByte(']') + buf.WriteByte(']') + + return buf.Bytes(), nil +} + +func (traces *harvestTraces) slice() []*HarvestTrace { + out := make([]*HarvestTrace, 0, traces.Len()) + out = append(out, (*traces.regular)...) + out = append(out, (*traces.synthetics)...) + + return out +} + +func (traces *harvestTraces) MergeIntoHarvest(h *Harvest) {} + +func (traces *harvestTraces) EndpointMethod() string { + return cmdTxnTraces +} diff --git a/vendor/github.com/newrelic/go-agent/internal/url.go b/vendor/github.com/newrelic/go-agent/internal/url.go new file mode 100644 index 00000000000..21976ee4fc9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/url.go @@ -0,0 +1,43 @@ +package internal + +import "net/url" + +// SafeURL removes sensitive information from a URL. +func SafeURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + // If the URL is opaque, we cannot be sure if it contains + // sensitive information. + return "" + } + + // Omit user, query, and fragment information for security. + ur := url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: u.Path, + } + return ur.String() +} + +// SafeURLFromString removes sensitive information from a URL. +func SafeURLFromString(rawurl string) string { + u, err := url.Parse(rawurl) + if nil != err { + return "" + } + return SafeURL(u) +} + +// HostFromURL returns the URL's host. +func HostFromURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + return "opaque" + } + return u.Host +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilities.go b/vendor/github.com/newrelic/go-agent/internal/utilities.go new file mode 100644 index 00000000000..75566f0ea9c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilities.go @@ -0,0 +1,106 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +// JSONString assists in logging JSON: Based on the formatter used to log +// Context contents, the contents could be marshalled as JSON or just printed +// directly. +type JSONString string + +// MarshalJSON returns the JSONString unmodified without any escaping. +func (js JSONString) MarshalJSON() ([]byte, error) { + if "" == js { + return []byte("null"), nil + } + return []byte(js), nil +} + +func removeFirstSegment(name string) string { + idx := strings.Index(name, "/") + if -1 == idx { + return name + } + return name[idx+1:] +} + +func timeToFloatSeconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000*1000) +} + +func timeToFloatMilliseconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000) +} + +// FloatSecondsToDuration turns a float64 in seconds into a time.Duration. +func FloatSecondsToDuration(seconds float64) time.Duration { + nanos := seconds * 1000 * 1000 * 1000 + return time.Duration(nanos) * time.Nanosecond +} + +func absTimeDiff(t1, t2 time.Time) time.Duration { + if t1.After(t2) { + return t1.Sub(t2) + } + return t2.Sub(t1) +} + +// CompactJSONString removes the whitespace from a JSON string. This function +// will panic if the string provided is not valid JSON. Thus is must only be +// used in testing code! +func CompactJSONString(js string) string { + buf := new(bytes.Buffer) + if err := json.Compact(buf, []byte(js)); err != nil { + panic(fmt.Errorf("unable to compact JSON: %v", err)) + } + return buf.String() +} + +// GetContentLengthFromHeader gets the content length from a HTTP header, or -1 +// if no content length is available. +func GetContentLengthFromHeader(h http.Header) int64 { + if cl := h.Get("Content-Length"); cl != "" { + if contentLength, err := strconv.ParseInt(cl, 10, 64); err == nil { + return contentLength + } + } + + return -1 +} + +// StringLengthByteLimit truncates strings using a byte-limit boundary and +// avoids terminating in the middle of a multibyte character. +func StringLengthByteLimit(str string, byteLimit int) string { + if len(str) <= byteLimit { + return str + } + + limitIndex := 0 + for pos := range str { + if pos > byteLimit { + break + } + limitIndex = pos + } + return str[0:limitIndex] +} + +func timeFromUnixMilliseconds(millis uint64) time.Time { + secs := int64(millis) / 1000 + msecsRemaining := int64(millis) % 1000 + nsecsRemaining := msecsRemaining * (1000 * 1000) + return time.Unix(secs, nsecsRemaining) +} + +// TimeToUnixMilliseconds converts a time into a Unix timestamp in millisecond +// units. +func TimeToUnixMilliseconds(tm time.Time) uint64 { + return uint64(tm.UnixNano()) / uint64(1000*1000) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/addresses.go b/vendor/github.com/newrelic/go-agent/internal/utilization/addresses.go new file mode 100644 index 00000000000..a6f64a1a2ef --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/addresses.go @@ -0,0 +1,73 @@ +package utilization + +import ( + "fmt" + "net" +) + +func nonlocalIPAddressesByInterface() (map[string][]string, error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, err + } + ips := make(map[string][]string, len(ifaces)) + for _, ifc := range ifaces { + addrs, err := ifc.Addrs() + if err != nil { + continue + } + for _, addr := range addrs { + var ip net.IP + switch iptype := addr.(type) { + case *net.IPAddr: + ip = iptype.IP + case *net.IPNet: + ip = iptype.IP + case *net.TCPAddr: + ip = iptype.IP + case *net.UDPAddr: + ip = iptype.IP + } + if nil != ip && !ip.IsLoopback() && !ip.IsUnspecified() { + ips[ifc.Name] = append(ips[ifc.Name], ip.String()) + } + } + } + return ips, nil +} + +// utilizationIPs gathers IP address which may help identify this entity. This +// code chooses all IPs from the interface which contains the IP of a UDP +// connection with NR. This approach has the following advantages: +// * Matches the behavior of the Java agent. +// * Reports fewer IPs to lower linking burden on infrastructure backend. +// * The UDP connection interface is more likely to contain unique external IPs. +func utilizationIPs() ([]string, error) { + // Port choice designed to match + // https://source.datanerd.us/java-agent/java_agent/blob/master/newrelic-agent/src/main/java/com/newrelic/agent/config/Hostname.java#L110 + conn, err := net.Dial("udp", "newrelic.com:10002") + if err != nil { + return nil, err + } + defer conn.Close() + + addr, ok := conn.LocalAddr().(*net.UDPAddr) + + if !ok || nil == addr || addr.IP.IsLoopback() || addr.IP.IsUnspecified() { + return nil, fmt.Errorf("unexpected connection address: %v", conn.LocalAddr()) + } + outboundIP := addr.IP.String() + + ipsByInterface, err := nonlocalIPAddressesByInterface() + if err != nil { + return nil, err + } + for _, ips := range ipsByInterface { + for _, ip := range ips { + if ip == outboundIP { + return ips, nil + } + } + } + return nil, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go new file mode 100644 index 00000000000..0651d259ad3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go @@ -0,0 +1,89 @@ +package utilization + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +const ( + awsHostname = "169.254.169.254" + awsEndpointPath = "/2016-09-02/dynamic/instance-identity/document" + awsEndpoint = "http://" + awsHostname + awsEndpointPath +) + +type aws struct { + InstanceID string `json:"instanceId,omitempty"` + InstanceType string `json:"instanceType,omitempty"` + AvailabilityZone string `json:"availabilityZone,omitempty"` +} + +func gatherAWS(util *Data, client *http.Client) error { + aws, err := getAWS(client) + if err != nil { + // Only return the error here if it is unexpected to prevent + // warning customers who aren't running AWS about a timeout. + if _, ok := err.(unexpectedAWSErr); ok { + return err + } + return nil + } + util.Vendors.AWS = aws + + return nil +} + +type unexpectedAWSErr struct{ e error } + +func (e unexpectedAWSErr) Error() string { + return fmt.Sprintf("unexpected AWS error: %v", e.e) +} + +func getAWS(client *http.Client) (*aws, error) { + response, err := client.Get(awsEndpoint) + if err != nil { + // No unexpectedAWSErr here: A timeout is usually going to + // happen. + return nil, err + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return nil, unexpectedAWSErr{e: fmt.Errorf("response code %d", response.StatusCode)} + } + + data, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, unexpectedAWSErr{e: err} + } + a := &aws{} + if err := json.Unmarshal(data, a); err != nil { + return nil, unexpectedAWSErr{e: err} + } + + if err := a.validate(); err != nil { + return nil, unexpectedAWSErr{e: err} + } + + return a, nil +} + +func (a *aws) validate() (err error) { + a.InstanceID, err = normalizeValue(a.InstanceID) + if err != nil { + return fmt.Errorf("invalid instance ID: %v", err) + } + + a.InstanceType, err = normalizeValue(a.InstanceType) + if err != nil { + return fmt.Errorf("invalid instance type: %v", err) + } + + a.AvailabilityZone, err = normalizeValue(a.AvailabilityZone) + if err != nil { + return fmt.Errorf("invalid availability zone: %v", err) + } + + return +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/azure.go b/vendor/github.com/newrelic/go-agent/internal/utilization/azure.go new file mode 100644 index 00000000000..aea6a1b027a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/azure.go @@ -0,0 +1,102 @@ +package utilization + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +const ( + azureHostname = "169.254.169.254" + azureEndpointPath = "/metadata/instance/compute?api-version=2017-03-01" + azureEndpoint = "http://" + azureHostname + azureEndpointPath +) + +type azure struct { + Location string `json:"location,omitempty"` + Name string `json:"name,omitempty"` + VMID string `json:"vmId,omitempty"` + VMSize string `json:"vmSize,omitempty"` +} + +func gatherAzure(util *Data, client *http.Client) error { + az, err := getAzure(client) + if err != nil { + // Only return the error here if it is unexpected to prevent + // warning customers who aren't running Azure about a timeout. + if _, ok := err.(unexpectedAzureErr); ok { + return err + } + return nil + } + util.Vendors.Azure = az + + return nil +} + +type unexpectedAzureErr struct{ e error } + +func (e unexpectedAzureErr) Error() string { + return fmt.Sprintf("unexpected Azure error: %v", e.e) +} + +func getAzure(client *http.Client) (*azure, error) { + req, err := http.NewRequest("GET", azureEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Add("Metadata", "true") + + response, err := client.Do(req) + if err != nil { + // No unexpectedAzureErr here: a timeout isusually going to + // happen. + return nil, err + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return nil, unexpectedAzureErr{e: fmt.Errorf("response code %d", response.StatusCode)} + } + + data, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, unexpectedAzureErr{e: err} + } + + az := &azure{} + if err := json.Unmarshal(data, az); err != nil { + return nil, unexpectedAzureErr{e: err} + } + + if err := az.validate(); err != nil { + return nil, unexpectedAzureErr{e: err} + } + + return az, nil +} + +func (az *azure) validate() (err error) { + az.Location, err = normalizeValue(az.Location) + if err != nil { + return fmt.Errorf("Invalid location: %v", err) + } + + az.Name, err = normalizeValue(az.Name) + if err != nil { + return fmt.Errorf("Invalid name: %v", err) + } + + az.VMID, err = normalizeValue(az.VMID) + if err != nil { + return fmt.Errorf("Invalid VM ID: %v", err) + } + + az.VMSize, err = normalizeValue(az.VMSize) + if err != nil { + return fmt.Errorf("Invalid VM size: %v", err) + } + + return +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn.go b/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn.go new file mode 100644 index 00000000000..d44b1a202ff --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn.go @@ -0,0 +1,28 @@ +// +build go1.8 + +package utilization + +import ( + "context" + "net" + "strings" +) + +func lookupAddr(addr string) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), lookupAddrTimeout) + defer cancel() + + r := &net.Resolver{} + + return r.LookupAddr(ctx, addr) +} + +func getFQDN(candidateIPs []string) string { + for _, ip := range candidateIPs { + names, _ := lookupAddr(ip) + if len(names) > 0 { + return strings.TrimSuffix(names[0], ".") + } + } + return "" +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn_pre18.go b/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn_pre18.go new file mode 100644 index 00000000000..0f549ed21c1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/fqdn_pre18.go @@ -0,0 +1,11 @@ +// +build !go1.8 + +package utilization + +// net.Resolver.LookupAddr was added in Go 1.8, and net.LookupAddr does not have +// a controllable timeout, so we skip the optional full_hostname on pre 1.8 +// versions. + +func getFQDN(candidateIPs []string) string { + return "" +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/gcp.go b/vendor/github.com/newrelic/go-agent/internal/utilization/gcp.go new file mode 100644 index 00000000000..e79c8e35190 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/gcp.go @@ -0,0 +1,152 @@ +package utilization + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +const ( + gcpHostname = "metadata.google.internal" + gcpEndpointPath = "/computeMetadata/v1/instance/?recursive=true" + gcpEndpoint = "http://" + gcpHostname + gcpEndpointPath +) + +func gatherGCP(util *Data, client *http.Client) error { + gcp, err := getGCP(client) + if err != nil { + // Only return the error here if it is unexpected to prevent + // warning customers who aren't running GCP about a timeout. + if _, ok := err.(unexpectedGCPErr); ok { + return err + } + return nil + } + util.Vendors.GCP = gcp + + return nil +} + +// numericString is used rather than json.Number because we want the output when +// marshalled to be a string, rather than a number. +type numericString string + +func (ns *numericString) MarshalJSON() ([]byte, error) { + return json.Marshal(ns.String()) +} + +func (ns *numericString) String() string { + return string(*ns) +} + +func (ns *numericString) UnmarshalJSON(data []byte) error { + var n int64 + + // Try to unmarshal as an integer first. + if err := json.Unmarshal(data, &n); err == nil { + *ns = numericString(fmt.Sprintf("%d", n)) + return nil + } + + // Otherwise, unmarshal as a string, and verify that it's numeric (for our + // definition of numeric, which is actually integral). + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + for _, r := range s { + if r < '0' || r > '9' { + return fmt.Errorf("invalid numeric character: %c", r) + } + } + + *ns = numericString(s) + return nil +} + +type gcp struct { + ID numericString `json:"id"` + MachineType string `json:"machineType,omitempty"` + Name string `json:"name,omitempty"` + Zone string `json:"zone,omitempty"` +} + +type unexpectedGCPErr struct{ e error } + +func (e unexpectedGCPErr) Error() string { + return fmt.Sprintf("unexpected GCP error: %v", e.e) +} + +func getGCP(client *http.Client) (*gcp, error) { + // GCP's metadata service requires a Metadata-Flavor header because... hell, I + // don't know, maybe they really like Guy Fieri? + req, err := http.NewRequest("GET", gcpEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Add("Metadata-Flavor", "Google") + + response, err := client.Do(req) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return nil, unexpectedGCPErr{e: fmt.Errorf("response code %d", response.StatusCode)} + } + + data, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, unexpectedGCPErr{e: err} + } + + g := &gcp{} + if err := json.Unmarshal(data, g); err != nil { + return nil, unexpectedGCPErr{e: err} + } + + if err := g.validate(); err != nil { + return nil, unexpectedGCPErr{e: err} + } + + return g, nil +} + +func (g *gcp) validate() (err error) { + id, err := normalizeValue(g.ID.String()) + if err != nil { + return fmt.Errorf("Invalid ID: %v", err) + } + g.ID = numericString(id) + + mt, err := normalizeValue(g.MachineType) + if err != nil { + return fmt.Errorf("Invalid machine type: %v", err) + } + g.MachineType = stripGCPPrefix(mt) + + g.Name, err = normalizeValue(g.Name) + if err != nil { + return fmt.Errorf("Invalid name: %v", err) + } + + zone, err := normalizeValue(g.Zone) + if err != nil { + return fmt.Errorf("Invalid zone: %v", err) + } + g.Zone = stripGCPPrefix(zone) + + return +} + +// We're only interested in the last element of slash separated paths for the +// machine type and zone values, so this function handles stripping the parts +// we don't need. +func stripGCPPrefix(s string) string { + parts := strings.Split(s, "/") + return parts[len(parts)-1] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/pcf.go b/vendor/github.com/newrelic/go-agent/internal/utilization/pcf.go new file mode 100644 index 00000000000..de0250a1899 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/pcf.go @@ -0,0 +1,80 @@ +package utilization + +import ( + "errors" + "fmt" + "net/http" + "os" +) + +type pcf struct { + InstanceGUID string `json:"cf_instance_guid,omitempty"` + InstanceIP string `json:"cf_instance_ip,omitempty"` + MemoryLimit string `json:"memory_limit,omitempty"` +} + +func gatherPCF(util *Data, _ *http.Client) error { + pcf, err := getPCF(os.Getenv) + if err != nil { + // Only return the error here if it is unexpected to prevent + // warning customers who aren't running PCF about a timeout. + if _, ok := err.(unexpectedPCFErr); ok { + return err + } + return nil + } + util.Vendors.PCF = pcf + + return nil +} + +type unexpectedPCFErr struct{ e error } + +func (e unexpectedPCFErr) Error() string { + return fmt.Sprintf("unexpected PCF error: %v", e.e) +} + +var ( + errNoPCFVariables = errors.New("no PCF environment variables present") +) + +func getPCF(initializer func(key string) string) (*pcf, error) { + p := &pcf{} + + p.InstanceGUID = initializer("CF_INSTANCE_GUID") + p.InstanceIP = initializer("CF_INSTANCE_IP") + p.MemoryLimit = initializer("MEMORY_LIMIT") + + if "" == p.InstanceGUID && "" == p.InstanceIP && "" == p.MemoryLimit { + return nil, errNoPCFVariables + } + + if err := p.validate(); err != nil { + return nil, unexpectedPCFErr{e: err} + } + + return p, nil +} + +func (pcf *pcf) validate() (err error) { + pcf.InstanceGUID, err = normalizeValue(pcf.InstanceGUID) + if err != nil { + return fmt.Errorf("Invalid instance GUID: %v", err) + } + + pcf.InstanceIP, err = normalizeValue(pcf.InstanceIP) + if err != nil { + return fmt.Errorf("Invalid instance IP: %v", err) + } + + pcf.MemoryLimit, err = normalizeValue(pcf.MemoryLimit) + if err != nil { + return fmt.Errorf("Invalid memory limit: %v", err) + } + + if pcf.InstanceGUID == "" || pcf.InstanceIP == "" || pcf.MemoryLimit == "" { + err = errors.New("One or more environment variables are unavailable") + } + + return +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/provider.go b/vendor/github.com/newrelic/go-agent/internal/utilization/provider.go new file mode 100644 index 00000000000..5d589368ada --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/provider.go @@ -0,0 +1,60 @@ +package utilization + +import ( + "fmt" + "strings" + "time" +) + +// Helper constants, functions, and types common to multiple providers are +// contained in this file. + +// Constants from the spec. +const ( + maxFieldValueSize = 255 // The maximum value size, in bytes. + providerTimeout = 1 * time.Second // The maximum time a HTTP provider may block. + lookupAddrTimeout = 500 * time.Millisecond +) + +type validationError struct{ e error } + +func (a validationError) Error() string { + return a.e.Error() +} + +func isValidationError(e error) bool { + _, is := e.(validationError) + return is +} + +// This function normalises string values per the utilization spec. +func normalizeValue(s string) (string, error) { + out := strings.TrimSpace(s) + + bytes := []byte(out) + if len(bytes) > maxFieldValueSize { + return "", validationError{fmt.Errorf("response is too long: got %d; expected <=%d", len(bytes), maxFieldValueSize)} + } + + for i, r := range out { + if !isAcceptableRune(r) { + return "", validationError{fmt.Errorf("bad character %x at position %d in response", r, i)} + } + } + + return out, nil +} + +func isAcceptableRune(r rune) bool { + switch r { + case 0xFFFD: + return false // invalid UTF-8 + case '_', ' ', '/', '.', '-': + return true + default: + return r > 0x7f || // still allows some invalid UTF-8, but that's the spec. + ('0' <= r && r <= '9') || + ('a' <= r && r <= 'z') || + ('A' <= r && r <= 'Z') + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go new file mode 100644 index 00000000000..5b338d2821d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go @@ -0,0 +1,239 @@ +// Package utilization implements the Utilization spec, available at +// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md +// +package utilization + +import ( + "net/http" + "os" + "runtime" + "sync" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +const ( + metadataVersion = 5 +) + +// Config controls the behavior of utilization information capture. +type Config struct { + DetectAWS bool + DetectAzure bool + DetectGCP bool + DetectPCF bool + DetectDocker bool + DetectKubernetes bool + LogicalProcessors int + TotalRAMMIB int + BillingHostname string +} + +type override struct { + LogicalProcessors *int `json:"logical_processors,omitempty"` + TotalRAMMIB *int `json:"total_ram_mib,omitempty"` + BillingHostname string `json:"hostname,omitempty"` +} + +// Data contains utilization system information. +type Data struct { + MetadataVersion int `json:"metadata_version"` + // Although `runtime.NumCPU()` will never fail, this field is a pointer + // to facilitate the cross agent tests. + LogicalProcessors *int `json:"logical_processors"` + RAMMiB *uint64 `json:"total_ram_mib"` + Hostname string `json:"hostname"` + FullHostname string `json:"full_hostname,omitempty"` + Addresses []string `json:"ip_address,omitempty"` + BootID string `json:"boot_id,omitempty"` + Config *override `json:"config,omitempty"` + Vendors *vendors `json:"vendors,omitempty"` +} + +var ( + sampleRAMMib = uint64(1024) + sampleLogicProc = int(16) + // SampleData contains sample utilization data useful for testing. + SampleData = Data{ + MetadataVersion: metadataVersion, + LogicalProcessors: &sampleLogicProc, + RAMMiB: &sampleRAMMib, + Hostname: "my-hostname", + } +) + +type docker struct { + ID string `json:"id,omitempty"` +} + +type kubernetes struct { + Host string `json:"kubernetes_service_host"` +} + +type vendors struct { + AWS *aws `json:"aws,omitempty"` + Azure *azure `json:"azure,omitempty"` + GCP *gcp `json:"gcp,omitempty"` + PCF *pcf `json:"pcf,omitempty"` + Docker *docker `json:"docker,omitempty"` + Kubernetes *kubernetes `json:"kubernetes,omitempty"` +} + +func (v *vendors) isEmpty() bool { + return nil == v || *v == vendors{} +} + +func overrideFromConfig(config Config) *override { + ov := &override{} + + if 0 != config.LogicalProcessors { + x := config.LogicalProcessors + ov.LogicalProcessors = &x + } + if 0 != config.TotalRAMMIB { + x := config.TotalRAMMIB + ov.TotalRAMMIB = &x + } + ov.BillingHostname = config.BillingHostname + + if "" == ov.BillingHostname && + nil == ov.LogicalProcessors && + nil == ov.TotalRAMMIB { + ov = nil + } + return ov +} + +// Gather gathers system utilization data. +func Gather(config Config, lg logger.Logger) *Data { + client := &http.Client{ + Timeout: providerTimeout, + } + return gatherWithClient(config, lg, client) +} + +func gatherWithClient(config Config, lg logger.Logger, client *http.Client) *Data { + var wg sync.WaitGroup + + cpu := runtime.NumCPU() + uDat := &Data{ + MetadataVersion: metadataVersion, + LogicalProcessors: &cpu, + Vendors: &vendors{}, + } + + warnGatherError := func(datatype string, err error) { + lg.Debug("error gathering utilization data", map[string]interface{}{ + "error": err.Error(), + "datatype": datatype, + }) + } + + // Gather IPs before spawning goroutines since the IPs are used in + // gathering full hostname. + if ips, err := utilizationIPs(); nil == err { + uDat.Addresses = ips + } else { + warnGatherError("addresses", err) + } + + // This closure allows us to run each gather function in a separate goroutine + // and wait for them at the end by closing over the wg WaitGroup we + // instantiated at the start of the function. + goGather := func(datatype string, gather func(*Data, *http.Client) error) { + wg.Add(1) + go func() { + // Note that locking around util is not necessary since + // WaitGroup provides acts as a memory barrier: + // https://groups.google.com/d/msg/golang-nuts/5oHzhzXCcmM/utEwIAApCQAJ + // Thus this code is fine as long as each routine is + // modifying a different field of util. + defer wg.Done() + if err := gather(uDat, client); err != nil { + warnGatherError(datatype, err) + } + }() + } + + // Kick off gathering which requires network calls in goroutines. + + if config.DetectAWS { + goGather("aws", gatherAWS) + } + + if config.DetectAzure { + goGather("azure", gatherAzure) + } + + if config.DetectPCF { + goGather("pcf", gatherPCF) + } + + if config.DetectGCP { + goGather("gcp", gatherGCP) + } + + wg.Add(1) + go func() { + defer wg.Done() + uDat.FullHostname = getFQDN(uDat.Addresses) + }() + + // Do non-network gathering sequentially since it is fast. + + if id, err := sysinfo.BootID(); err != nil { + if err != sysinfo.ErrFeatureUnsupported { + warnGatherError("bootid", err) + } + } else { + uDat.BootID = id + } + + if config.DetectKubernetes { + gatherKubernetes(uDat.Vendors, os.Getenv) + } + + if config.DetectDocker { + if id, err := sysinfo.DockerID(); err != nil { + if err != sysinfo.ErrFeatureUnsupported && + err != sysinfo.ErrDockerNotFound { + warnGatherError("docker", err) + } + } else { + uDat.Vendors.Docker = &docker{ID: id} + } + } + + if hostname, err := sysinfo.Hostname(); nil == err { + uDat.Hostname = hostname + } else { + warnGatherError("hostname", err) + } + + if bts, err := sysinfo.PhysicalMemoryBytes(); nil == err { + mib := sysinfo.BytesToMebibytes(bts) + uDat.RAMMiB = &mib + } else { + warnGatherError("memory", err) + } + + // Now we wait for everything! + wg.Wait() + + // Override whatever needs to be overridden. + uDat.Config = overrideFromConfig(config) + + if uDat.Vendors.isEmpty() { + // Per spec, we MUST NOT send any vendors hash if it's empty. + uDat.Vendors = nil + } + + return uDat +} + +func gatherKubernetes(v *vendors, getenv func(string) string) { + if host := getenv("KUBERNETES_SERVICE_HOST"); host != "" { + v.Kubernetes = &kubernetes{Host: host} + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_app.go b/vendor/github.com/newrelic/go-agent/internal_app.go new file mode 100644 index 00000000000..50ca494147b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_app.go @@ -0,0 +1,607 @@ +package newrelic + +import ( + "errors" + "fmt" + "io" + "math" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" +) + +var ( + // NEW_RELIC_DEBUG_LOGGING can be set to anything to enable additional + // debug logging: the agent will log every transaction's data at info + // level. + envDebugLogging = "NEW_RELIC_DEBUG_LOGGING" + debugLogging = os.Getenv(envDebugLogging) +) + +type dataConsumer interface { + Consume(internal.AgentRunID, internal.Harvestable) +} + +type appData struct { + id internal.AgentRunID + data internal.Harvestable +} + +type app struct { + Logger + config Config + rpmControls internal.RpmControls + testHarvest *internal.Harvest + + // placeholderRun is used when the application is not connected. + placeholderRun *appRun + + // initiateShutdown is used to tell the processor to shutdown. + initiateShutdown chan struct{} + + // shutdownStarted and shutdownComplete are closed by the processor + // goroutine to indicate the shutdown status. Two channels are used so + // that the call of app.Shutdown() can block until shutdown has + // completed but other goroutines can exit when shutdown has started. + // This is not just an optimization: This prevents a deadlock if + // harvesting data during the shutdown fails and an attempt is made to + // merge the data into the next harvest. + shutdownStarted chan struct{} + shutdownComplete chan struct{} + + // Sends to these channels should not occur without a <-shutdownStarted + // select option to prevent deadlock. + dataChan chan appData + collectorErrorChan chan internal.RPMResponse + connectChan chan *appRun + + // This mutex protects both `run` and `err`, both of which should only + // be accessed using getState and setState. + sync.RWMutex + // run is non-nil when the app is successfully connected. It is + // immutable. + run *appRun + // err is non-nil if the application will never be connected again + // (disconnect, license exception, shutdown). + err error + + serverless *internal.ServerlessHarvest +} + +func (app *app) doHarvest(h *internal.Harvest, harvestStart time.Time, run *appRun) { + h.CreateFinalMetrics(run.Reply) + + payloads := h.Payloads(app.config.DistributedTracer.Enabled) + for _, p := range payloads { + cmd := p.EndpointMethod() + data, err := p.Data(run.Reply.RunID.String(), harvestStart) + + if nil != err { + app.Warn("unable to create harvest data", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + continue + } + if nil == data { + continue + } + + call := internal.RpmCmd{ + Collector: run.Reply.Collector, + RunID: run.Reply.RunID.String(), + Name: cmd, + Data: data, + RequestHeadersMap: run.Reply.RequestHeadersMap, + MaxPayloadSize: run.Reply.MaxPayloadSizeInBytes, + } + + resp := internal.CollectorRequest(call, app.rpmControls) + + if resp.IsDisconnect() || resp.IsRestartException() { + select { + case app.collectorErrorChan <- resp: + case <-app.shutdownStarted: + } + return + } + + if nil != resp.Err { + app.Warn("harvest failure", map[string]interface{}{ + "cmd": cmd, + "error": resp.Err.Error(), + "retain_data": resp.ShouldSaveHarvestData(), + }) + } + + if resp.ShouldSaveHarvestData() { + app.Consume(run.Reply.RunID, p) + } + } +} + +func (app *app) connectRoutine() { + connectAttempt := 0 + for { + reply, resp := internal.ConnectAttempt(config{app.config}, + app.config.SecurityPoliciesToken, app.config.HighSecurity, app.rpmControls) + + if reply != nil { + select { + case app.connectChan <- newAppRun(app.config, reply): + case <-app.shutdownStarted: + } + return + } + + if resp.IsDisconnect() { + select { + case app.collectorErrorChan <- resp: + case <-app.shutdownStarted: + } + return + } + + if nil != resp.Err { + app.Warn("application connect failure", map[string]interface{}{ + "error": resp.Err.Error(), + }) + } + + backoff := getConnectBackoffTime(connectAttempt) + time.Sleep(time.Duration(backoff) * time.Second) + connectAttempt++ + } +} + +// Connect backoff time follows the sequence defined at +// https://source.datanerd.us/agents/agent-specs/blob/master/Collector-Response-Handling.md#retries-and-backoffs +func getConnectBackoffTime(attempt int) int { + connectBackoffTimes := [...]int{15, 15, 30, 60, 120, 300} + l := len(connectBackoffTimes) + if (attempt < 0) || (attempt >= l) { + return connectBackoffTimes[l-1] + } + return connectBackoffTimes[attempt] +} + +func debug(data internal.Harvestable, lg Logger) { + now := time.Now() + h := internal.NewHarvest(now, nil) + data.MergeIntoHarvest(h) + ps := h.Payloads(false) + for _, p := range ps { + cmd := p.EndpointMethod() + d, err := p.Data("agent run id", now) + if nil == d && nil == err { + continue + } + if nil != err { + lg.Info("integration", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + continue + } + lg.Info("integration", map[string]interface{}{ + "cmd": cmd, + "data": internal.JSONString(d), + }) + } +} + +func processConnectMessages(run *appRun, lg Logger) { + for _, msg := range run.Reply.Messages { + event := "collector message" + cn := map[string]interface{}{"msg": msg.Message} + + switch strings.ToLower(msg.Level) { + case "error": + lg.Error(event, cn) + case "warn": + lg.Warn(event, cn) + case "info": + lg.Info(event, cn) + case "debug", "verbose": + lg.Debug(event, cn) + } + } +} + +func (app *app) process() { + // Both the harvest and the run are non-nil when the app is connected, + // and nil otherwise. + var h *internal.Harvest + var run *appRun + + harvestTicker := time.NewTicker(time.Second) + defer harvestTicker.Stop() + + for { + select { + case <-harvestTicker.C: + if nil != run { + now := time.Now() + if ready := h.Ready(now, run.Reply); nil != ready { + go app.doHarvest(ready, now, run) + } + } + case d := <-app.dataChan: + if nil != run && run.Reply.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + case <-app.initiateShutdown: + close(app.shutdownStarted) + + // Remove the run before merging any final data to + // ensure a bounded number of receives from dataChan. + app.setState(nil, errors.New("application shut down")) + + if nil != run { + for done := false; !done; { + select { + case d := <-app.dataChan: + if run.Reply.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + default: + done = true + } + } + app.doHarvest(h, time.Now(), run) + } + + close(app.shutdownComplete) + return + case resp := <-app.collectorErrorChan: + run = nil + h = nil + app.setState(nil, nil) + + if resp.IsDisconnect() { + app.setState(nil, resp.Err) + app.Error("application disconnected", map[string]interface{}{ + "app": app.config.AppName, + }) + } else if resp.IsRestartException() { + app.Info("application restarted", map[string]interface{}{ + "app": app.config.AppName, + }) + go app.connectRoutine() + } + case run = <-app.connectChan: + h = internal.NewHarvest(time.Now(), run.Reply) + app.setState(run, nil) + + app.Info("application connected", map[string]interface{}{ + "app": app.config.AppName, + "run": run.Reply.RunID.String(), + }) + processConnectMessages(run, app) + } + } +} + +func (app *app) Shutdown(timeout time.Duration) { + if !app.config.Enabled { + return + } + if app.config.ServerlessMode.Enabled { + return + } + + select { + case app.initiateShutdown <- struct{}{}: + default: + } + + // Block until shutdown is done or timeout occurs. + t := time.NewTimer(timeout) + select { + case <-app.shutdownComplete: + case <-t.C: + } + t.Stop() + + app.Info("application shutdown", map[string]interface{}{ + "app": app.config.AppName, + }) +} + +func runSampler(app *app, period time.Duration) { + previous := internal.GetSample(time.Now(), app) + t := time.NewTicker(period) + for { + select { + case now := <-t.C: + current := internal.GetSample(now, app) + run, _ := app.getState() + app.Consume(run.Reply.RunID, internal.GetStats(internal.Samples{ + Previous: previous, + Current: current, + })) + previous = current + case <-app.shutdownStarted: + t.Stop() + return + } + } +} + +func (app *app) WaitForConnection(timeout time.Duration) error { + if !app.config.Enabled { + return nil + } + if app.config.ServerlessMode.Enabled { + return nil + } + deadline := time.Now().Add(timeout) + pollPeriod := 50 * time.Millisecond + + for { + run, err := app.getState() + if nil != err { + return err + } + if run.Reply.RunID != "" { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("timeout out after %s", timeout.String()) + } + time.Sleep(pollPeriod) + } +} + +func newApp(c Config) (Application, error) { + c = copyConfigReferenceFields(c) + if err := c.Validate(); nil != err { + return nil, err + } + if nil == c.Logger { + c.Logger = logger.ShimLogger{} + } + app := &app{ + Logger: c.Logger, + config: c, + placeholderRun: newAppRun(c, internal.ConnectReplyDefaults()), + + // This channel must be buffered since Shutdown makes a + // non-blocking send attempt. + initiateShutdown: make(chan struct{}, 1), + + shutdownStarted: make(chan struct{}), + shutdownComplete: make(chan struct{}), + connectChan: make(chan *appRun, 1), + collectorErrorChan: make(chan internal.RPMResponse, 1), + dataChan: make(chan appData, internal.AppDataChanSize), + rpmControls: internal.RpmControls{ + License: c.License, + Client: &http.Client{ + Transport: c.Transport, + Timeout: internal.CollectorTimeout, + }, + Logger: c.Logger, + AgentVersion: Version, + }, + } + + app.Info("application created", map[string]interface{}{ + "app": app.config.AppName, + "version": Version, + "enabled": app.config.Enabled, + }) + + if app.config.Enabled { + if app.config.ServerlessMode.Enabled { + reply := newServerlessConnectReply(c) + app.run = newAppRun(c, reply) + app.serverless = internal.NewServerlessHarvest(c.Logger, Version, os.Getenv) + } else { + go app.process() + go app.connectRoutine() + if app.config.RuntimeSampler.Enabled { + go runSampler(app, internal.RuntimeSamplerPeriod) + } + } + } + + return app, nil +} + +var ( + _ internal.HarvestTestinger = &app{} + _ internal.Expect = &app{} +) + +func (app *app) HarvestTesting(replyfn func(*internal.ConnectReply)) { + if nil != replyfn { + reply := internal.ConnectReplyDefaults() + replyfn(reply) + app.placeholderRun = newAppRun(app.config, reply) + } + app.testHarvest = internal.NewHarvest(time.Now(), nil) +} + +func (app *app) getState() (*appRun, error) { + app.RLock() + defer app.RUnlock() + + run := app.run + if nil == run { + run = app.placeholderRun + } + return run, app.err +} + +func (app *app) setState(run *appRun, err error) { + app.Lock() + defer app.Unlock() + + app.run = run + app.err = err +} + +// StartTransaction implements newrelic.Application's StartTransaction. +func (app *app) StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction { + run, _ := app.getState() + txn := upgradeTxn(newTxn(txnInput{ + app: app, + appRun: run, + writer: w, + Consumer: app, + }, name)) + + if nil != r { + txn.SetWebRequest(NewWebRequest(r)) + } + return txn +} + +var ( + errHighSecurityEnabled = errors.New("high security enabled") + errCustomEventsDisabled = errors.New("custom events disabled") + errCustomEventsRemoteDisabled = errors.New("custom events disabled by server") +) + +// RecordCustomEvent implements newrelic.Application's RecordCustomEvent. +func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error { + if app.config.HighSecurity { + return errHighSecurityEnabled + } + + if !app.config.CustomInsightsEvents.Enabled { + return errCustomEventsDisabled + } + + event, e := internal.CreateCustomEvent(eventType, params, time.Now()) + if nil != e { + return e + } + + run, _ := app.getState() + if !run.Reply.CollectCustomEvents { + return errCustomEventsRemoteDisabled + } + + if !run.Reply.SecurityPolicies.CustomEvents.Enabled() { + return errSecurityPolicy + } + + app.Consume(run.Reply.RunID, event) + + return nil +} + +var ( + errMetricInf = errors.New("invalid metric value: inf") + errMetricNaN = errors.New("invalid metric value: NaN") + errMetricNameEmpty = errors.New("missing metric name") + errMetricServerless = errors.New("custom metrics are not currently supported in serverless mode") +) + +// RecordCustomMetric implements newrelic.Application's RecordCustomMetric. +func (app *app) RecordCustomMetric(name string, value float64) error { + if app.config.ServerlessMode.Enabled { + return errMetricServerless + } + if math.IsNaN(value) { + return errMetricNaN + } + if math.IsInf(value, 0) { + return errMetricInf + } + if "" == name { + return errMetricNameEmpty + } + run, _ := app.getState() + app.Consume(run.Reply.RunID, internal.CustomMetric{ + RawInputName: name, + Value: value, + }) + return nil +} + +var ( + _ internal.ServerlessWriter = &app{} +) + +func (app *app) ServerlessWrite(arn string, writer io.Writer) { + app.serverless.Write(arn, writer) +} + +func (app *app) Consume(id internal.AgentRunID, data internal.Harvestable) { + if "" != debugLogging { + debug(data, app) + } + + app.serverless.Consume(data) + + if nil != app.testHarvest { + data.MergeIntoHarvest(app.testHarvest) + return + } + + if "" == id { + return + } + + select { + case app.dataChan <- appData{id, data}: + case <-app.shutdownStarted: + } +} + +func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantEvent) { + internal.ExpectCustomEvents(internal.ExtendValidator(t, "custom events"), app.testHarvest.CustomEvents, want) +} + +func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) { + t = internal.ExtendValidator(t, "traced errors") + internal.ExpectErrors(t, app.testHarvest.ErrorTraces, want) +} + +func (app *app) ExpectErrorEvents(t internal.Validator, want []internal.WantEvent) { + t = internal.ExtendValidator(t, "error events") + internal.ExpectErrorEvents(t, app.testHarvest.ErrorEvents, want) +} + +func (app *app) ExpectSpanEvents(t internal.Validator, want []internal.WantEvent) { + t = internal.ExtendValidator(t, "spans events") + internal.ExpectSpanEvents(t, app.testHarvest.SpanEvents, want) +} + +func (app *app) ExpectTxnEvents(t internal.Validator, want []internal.WantEvent) { + t = internal.ExtendValidator(t, "txn events") + internal.ExpectTxnEvents(t, app.testHarvest.TxnEvents, want) +} + +func (app *app) ExpectMetrics(t internal.Validator, want []internal.WantMetric) { + t = internal.ExtendValidator(t, "metrics") + internal.ExpectMetrics(t, app.testHarvest.Metrics, want) +} + +func (app *app) ExpectMetricsPresent(t internal.Validator, want []internal.WantMetric) { + t = internal.ExtendValidator(t, "metrics") + internal.ExpectMetricsPresent(t, app.testHarvest.Metrics, want) +} + +func (app *app) ExpectTxnMetrics(t internal.Validator, want internal.WantTxn) { + t = internal.ExtendValidator(t, "metrics") + internal.ExpectTxnMetrics(t, app.testHarvest.Metrics, want) +} + +func (app *app) ExpectTxnTraces(t internal.Validator, want []internal.WantTxnTrace) { + t = internal.ExtendValidator(t, "txn traces") + internal.ExpectTxnTraces(t, app.testHarvest.TxnTraces, want) +} + +func (app *app) ExpectSlowQueries(t internal.Validator, want []internal.WantSlowQuery) { + t = internal.ExtendValidator(t, "slow queries") + internal.ExpectSlowQueries(t, app.testHarvest.SlowSQLs, want) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_config.go b/vendor/github.com/newrelic/go-agent/internal_config.go new file mode 100644 index 00000000000..d8e6180957f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_config.go @@ -0,0 +1,190 @@ +package newrelic + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/utilization" +) + +func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig { + cp := c + if nil != c.Include { + cp.Include = make([]string, len(c.Include)) + copy(cp.Include, c.Include) + } + if nil != c.Exclude { + cp.Exclude = make([]string, len(c.Exclude)) + copy(cp.Exclude, c.Exclude) + } + return cp +} + +func copyConfigReferenceFields(cfg Config) Config { + cp := cfg + if nil != cfg.Labels { + cp.Labels = make(map[string]string, len(cfg.Labels)) + for key, val := range cfg.Labels { + cp.Labels[key] = val + } + } + if nil != cfg.ErrorCollector.IgnoreStatusCodes { + ignored := make([]int, len(cfg.ErrorCollector.IgnoreStatusCodes)) + copy(ignored, cfg.ErrorCollector.IgnoreStatusCodes) + cp.ErrorCollector.IgnoreStatusCodes = ignored + } + + cp.Attributes = copyDestConfig(cfg.Attributes) + cp.ErrorCollector.Attributes = copyDestConfig(cfg.ErrorCollector.Attributes) + cp.TransactionEvents.Attributes = copyDestConfig(cfg.TransactionEvents.Attributes) + cp.TransactionTracer.Attributes = copyDestConfig(cfg.TransactionTracer.Attributes) + cp.BrowserMonitoring.Attributes = copyDestConfig(cfg.BrowserMonitoring.Attributes) + cp.SpanEvents.Attributes = copyDestConfig(cfg.SpanEvents.Attributes) + cp.TransactionTracer.Segments.Attributes = copyDestConfig(cfg.TransactionTracer.Segments.Attributes) + + return cp +} + +func transportSetting(t http.RoundTripper) interface{} { + if nil == t { + return nil + } + return fmt.Sprintf("%T", t) +} + +func loggerSetting(lg Logger) interface{} { + if nil == lg { + return nil + } + if _, ok := lg.(logger.ShimLogger); ok { + return nil + } + return fmt.Sprintf("%T", lg) +} + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Custom-Host-Names.md + hostByteLimit = 255 +) + +type settings Config + +func (s settings) MarshalJSON() ([]byte, error) { + c := Config(s) + transport := c.Transport + c.Transport = nil + logger := c.Logger + c.Logger = nil + + js, err := json.Marshal(c) + if nil != err { + return nil, err + } + fields := make(map[string]interface{}) + err = json.Unmarshal(js, &fields) + if nil != err { + return nil, err + } + // The License field is not simply ignored by adding the `json:"-"` tag + // to it since we want to allow consumers to populate Config from JSON. + delete(fields, `License`) + fields[`Transport`] = transportSetting(transport) + fields[`Logger`] = loggerSetting(logger) + + // Browser monitoring support. + if c.BrowserMonitoring.Enabled { + fields[`browser_monitoring.loader`] = "rum" + } + + return json.Marshal(fields) +} + +func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e internal.Environment, version string, securityPolicies *internal.SecurityPolicies, metadata map[string]string) ([]byte, error) { + return json.Marshal([]interface{}{struct { + Pid int `json:"pid"` + Language string `json:"language"` + Version string `json:"agent_version"` + Host string `json:"host"` + HostDisplayName string `json:"display_host,omitempty"` + Settings interface{} `json:"settings"` + AppName []string `json:"app_name"` + HighSecurity bool `json:"high_security"` + Labels internal.Labels `json:"labels,omitempty"` + Environment internal.Environment `json:"environment"` + Identifier string `json:"identifier"` + Util *utilization.Data `json:"utilization"` + SecurityPolicies *internal.SecurityPolicies `json:"security_policies,omitempty"` + Metadata map[string]string `json:"metadata"` + EventData internal.EventHarvestConfig `json:"event_harvest_config"` + }{ + Pid: pid, + Language: internal.AgentLanguage, + Version: version, + Host: internal.StringLengthByteLimit(util.Hostname, hostByteLimit), + HostDisplayName: internal.StringLengthByteLimit(c.HostDisplayName, hostByteLimit), + Settings: (settings)(c), + AppName: strings.Split(c.AppName, ";"), + HighSecurity: c.HighSecurity, + Labels: internal.Labels(c.Labels), + Environment: e, + // This identifier field is provided to avoid: + // https://newrelic.atlassian.net/browse/DSCORE-778 + // + // This identifier is used by the collector to look up the real + // agent. If an identifier isn't provided, the collector will + // create its own based on the first appname, which prevents a + // single daemon from connecting "a;b" and "a;c" at the same + // time. + // + // Providing the identifier below works around this issue and + // allows users more flexibility in using application rollups. + Identifier: c.AppName, + Util: util, + SecurityPolicies: securityPolicies, + Metadata: metadata, + EventData: internal.DefaultEventHarvestConfig(), + }}) +} + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#metadata-hash + metadataPrefix = "NEW_RELIC_METADATA_" +) + +func gatherMetadata(environ func() []string) map[string]string { + metadata := make(map[string]string) + env := environ() + for _, pair := range env { + if strings.HasPrefix(pair, metadataPrefix) { + idx := strings.Index(pair, "=") + if idx >= 0 { + metadata[pair[0:idx]] = pair[idx+1:] + } + } + } + return metadata +} + +// config allows CreateConnectJSON to be a method on a non-public type. +type config struct{ Config } + +func (c config) CreateConnectJSON(securityPolicies *internal.SecurityPolicies) ([]byte, error) { + env := internal.NewEnvironment() + util := utilization.Gather(utilization.Config{ + DetectAWS: c.Utilization.DetectAWS, + DetectAzure: c.Utilization.DetectAzure, + DetectPCF: c.Utilization.DetectPCF, + DetectGCP: c.Utilization.DetectGCP, + DetectDocker: c.Utilization.DetectDocker, + DetectKubernetes: c.Utilization.DetectKubernetes, + LogicalProcessors: c.Utilization.LogicalProcessors, + TotalRAMMIB: c.Utilization.TotalRAMMIB, + BillingHostname: c.Utilization.BillingHostname, + }, c.Logger) + return configConnectJSONInternal(c.Config, os.Getpid(), util, env, Version, securityPolicies, gatherMetadata(os.Environ)) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_response_writer.go b/vendor/github.com/newrelic/go-agent/internal_response_writer.go new file mode 100644 index 00000000000..89cd8a86374 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_response_writer.go @@ -0,0 +1,154 @@ +package newrelic + +import ( + "bufio" + "io" + "net" + "net/http" + + "github.com/newrelic/go-agent/internal" +) + +func (thd *thread) CloseNotify() <-chan bool { + return thd.txn.getWriter().(http.CloseNotifier).CloseNotify() +} +func (thd *thread) Flush() { + thd.txn.getWriter().(http.Flusher).Flush() +} +func (thd *thread) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return thd.txn.getWriter().(http.Hijacker).Hijack() +} +func (thd *thread) ReadFrom(r io.Reader) (int64, error) { + return thd.txn.getWriter().(io.ReaderFrom).ReadFrom(r) +} + +type threadWithExtras interface { + Transaction + internal.AddAgentAttributer + internal.AddAgentSpanAttributer +} + +func upgradeTxn(thd *thread) Transaction { + // Note that thd.txn.getWriter() is not used here. The transaction is + // locked (or under construction) when this function is used. + + // GENERATED CODE DO NOT MODIFY + // This code generated by internal/tools/interface-wrapping + var ( + i0 int32 = 1 << 0 + i1 int32 = 1 << 1 + i2 int32 = 1 << 2 + i3 int32 = 1 << 3 + ) + var interfaceSet int32 + if _, ok := thd.txn.writer.(http.CloseNotifier); ok { + interfaceSet |= i0 + } + if _, ok := thd.txn.writer.(http.Flusher); ok { + interfaceSet |= i1 + } + if _, ok := thd.txn.writer.(http.Hijacker); ok { + interfaceSet |= i2 + } + if _, ok := thd.txn.writer.(io.ReaderFrom); ok { + interfaceSet |= i3 + } + switch interfaceSet { + default: // No optional interfaces implemented + return struct { + threadWithExtras + }{thd} + case i0: + return struct { + threadWithExtras + http.CloseNotifier + }{thd, thd} + case i1: + return struct { + threadWithExtras + http.Flusher + }{thd, thd} + case i0 | i1: + return struct { + threadWithExtras + http.CloseNotifier + http.Flusher + }{thd, thd, thd} + case i2: + return struct { + threadWithExtras + http.Hijacker + }{thd, thd} + case i0 | i2: + return struct { + threadWithExtras + http.CloseNotifier + http.Hijacker + }{thd, thd, thd} + case i1 | i2: + return struct { + threadWithExtras + http.Flusher + http.Hijacker + }{thd, thd, thd} + case i0 | i1 | i2: + return struct { + threadWithExtras + http.CloseNotifier + http.Flusher + http.Hijacker + }{thd, thd, thd, thd} + case i3: + return struct { + threadWithExtras + io.ReaderFrom + }{thd, thd} + case i0 | i3: + return struct { + threadWithExtras + http.CloseNotifier + io.ReaderFrom + }{thd, thd, thd} + case i1 | i3: + return struct { + threadWithExtras + http.Flusher + io.ReaderFrom + }{thd, thd, thd} + case i0 | i1 | i3: + return struct { + threadWithExtras + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{thd, thd, thd, thd} + case i2 | i3: + return struct { + threadWithExtras + http.Hijacker + io.ReaderFrom + }{thd, thd, thd} + case i0 | i2 | i3: + return struct { + threadWithExtras + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{thd, thd, thd, thd} + case i1 | i2 | i3: + return struct { + threadWithExtras + http.Flusher + http.Hijacker + io.ReaderFrom + }{thd, thd, thd, thd} + case i0 | i1 | i2 | i3: + return struct { + threadWithExtras + http.CloseNotifier + http.Flusher + http.Hijacker + io.ReaderFrom + }{thd, thd, thd, thd, thd} + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_txn.go b/vendor/github.com/newrelic/go-agent/internal_txn.go new file mode 100644 index 00000000000..e6e9e46cbf9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_txn.go @@ -0,0 +1,1134 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" +) + +type txnInput struct { + // This ResponseWriter should only be accessed using txn.getWriter() + writer http.ResponseWriter + app Application + Consumer dataConsumer + *appRun +} + +type txn struct { + txnInput + // This mutex is required since the consumer may call the public API + // interface functions from different routines. + sync.Mutex + // finished indicates whether or not End() has been called. After + // finished has been set to true, no recording should occur. + finished bool + numPayloadsCreated uint32 + sampledCalculated bool + + ignore bool + + // wroteHeader prevents capturing multiple response code errors if the + // user erroneously calls WriteHeader multiple times. + wroteHeader bool + + internal.TxnData + + mainThread internal.Thread + asyncThreads []*internal.Thread +} + +type thread struct { + *txn + // thread does not have locking because it should only be accessed while + // the txn is locked. + thread *internal.Thread +} + +func (txn *txn) markStart(now time.Time) { + txn.Start = now + // The mainThread is considered active now. + txn.mainThread.RecordActivity(now) + +} + +func (txn *txn) markEnd(now time.Time, thread *internal.Thread) { + txn.Stop = now + // The thread on which End() was called is considered active now. + thread.RecordActivity(now) + txn.Duration = txn.Stop.Sub(txn.Start) + + // TotalTime is the sum of "active time" across all threads. A thread + // was active when it started the transaction, stopped the transaction, + // started a segment, or stopped a segment. + txn.TotalTime = txn.mainThread.TotalTime() + for _, thd := range txn.asyncThreads { + txn.TotalTime += thd.TotalTime() + } + // Ensure that TotalTime is at least as large as Duration so that the + // graphs look sensible. This can happen under the following situation: + // goroutine1: txn.start----|segment1| + // goroutine2: |segment2|----txn.end + if txn.Duration > txn.TotalTime { + txn.TotalTime = txn.Duration + } +} + +func newTxn(input txnInput, name string) *thread { + txn := &txn{ + txnInput: input, + } + txn.markStart(time.Now()) + + txn.Name = name + txn.Attrs = internal.NewAttributes(input.AttributeConfig) + + if input.Config.DistributedTracer.Enabled { + txn.BetterCAT.Enabled = true + txn.BetterCAT.Priority = internal.NewPriority() + txn.TraceIDGenerator = input.Reply.TraceIDGenerator + txn.BetterCAT.ID = txn.TraceIDGenerator.GenerateTraceID() + txn.SpanEventsEnabled = txn.Config.SpanEvents.Enabled + txn.LazilyCalculateSampled = txn.lazilyCalculateSampled + } + + txn.Attrs.Agent.Add(internal.AttributeHostDisplayName, txn.Config.HostDisplayName, nil) + txn.TxnTrace.Enabled = txn.Config.TransactionTracer.Enabled + txn.TxnTrace.SegmentThreshold = txn.Config.TransactionTracer.SegmentThreshold + txn.StackTraceThreshold = txn.Config.TransactionTracer.StackTraceThreshold + txn.SlowQueriesEnabled = txn.Config.DatastoreTracer.SlowQuery.Enabled + txn.SlowQueryThreshold = txn.Config.DatastoreTracer.SlowQuery.Threshold + + // Synthetics support is tied up with a transaction's Old CAT field, + // CrossProcess. To support Synthetics with either BetterCAT or Old CAT, + // Initialize the CrossProcess field of the transaction, passing in + // the top-level configuration. + doOldCAT := txn.Config.CrossApplicationTracer.Enabled + noGUID := txn.Config.DistributedTracer.Enabled + txn.CrossProcess.Init(doOldCAT, noGUID, input.Reply) + + return &thread{ + txn: txn, + thread: &txn.mainThread, + } +} + +// lazilyCalculateSampled calculates and returns whether or not the transaction +// should be sampled. Sampled is not computed at the beginning of the +// transaction because we want to calculate Sampled only for transactions that +// do not accept an inbound payload. +func (txn *txn) lazilyCalculateSampled() bool { + if !txn.BetterCAT.Enabled { + return false + } + if txn.sampledCalculated { + return txn.BetterCAT.Sampled + } + txn.BetterCAT.Sampled = txn.Reply.AdaptiveSampler.ComputeSampled(txn.BetterCAT.Priority.Float32(), time.Now()) + if txn.BetterCAT.Sampled { + txn.BetterCAT.Priority += 1.0 + } + txn.sampledCalculated = true + return txn.BetterCAT.Sampled +} + +type requestWrap struct{ request *http.Request } + +func (r requestWrap) Header() http.Header { return r.request.Header } +func (r requestWrap) URL() *url.URL { return r.request.URL } +func (r requestWrap) Method() string { return r.request.Method } + +func (r requestWrap) Transport() TransportType { + if strings.HasPrefix(r.request.Proto, "HTTP") { + if r.request.TLS != nil { + return TransportHTTPS + } + return TransportHTTP + } + return TransportUnknown + +} + +type staticWebRequest struct { + header http.Header + url *url.URL + method string + transport TransportType +} + +func (r staticWebRequest) Header() http.Header { return r.header } +func (r staticWebRequest) URL() *url.URL { return r.url } +func (r staticWebRequest) Method() string { return r.method } +func (r staticWebRequest) Transport() TransportType { return TransportHTTP } + +func (txn *txn) SetWebRequest(r WebRequest) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + // Any call to SetWebRequest should indicate a web transaction. + txn.IsWeb = true + + if nil == r { + return nil + } + h := r.Header() + if nil != h { + txn.Queuing = internal.QueueDuration(h, txn.Start) + + if p := h.Get(DistributedTracePayloadHeader); p != "" { + txn.acceptDistributedTracePayloadLocked(r.Transport(), p) + } + + txn.CrossProcess.InboundHTTPRequest(h) + } + + internal.RequestAgentAttributes(txn.Attrs, r.Method(), h, r.URL()) + + return nil +} + +func (thd *thread) SetWebResponse(w http.ResponseWriter) Transaction { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + // Replace the ResponseWriter even if the transaction has ended so that + // consumers calling ResponseWriter methods on the transactions see that + // data flowing through as expected. + txn.writer = w + + return upgradeTxn(&thread{ + thread: thd.thread, + txn: txn, + }) +} + +func (txn *txn) freezeName() { + if txn.ignore || ("" != txn.FinalName) { + return + } + + txn.FinalName = internal.CreateFullTxnName(txn.Name, txn.Reply, txn.IsWeb) + if "" == txn.FinalName { + txn.ignore = true + } +} + +func (txn *txn) getsApdex() bool { + return txn.IsWeb +} + +func (txn *txn) shouldSaveTrace() bool { + if !txn.Config.TransactionTracer.Enabled { + return false + } + if txn.CrossProcess.IsSynthetics() { + return true + } + return txn.Duration >= txn.txnTraceThreshold(txn.ApdexThreshold) +} + +func (txn *txn) MergeIntoHarvest(h *internal.Harvest) { + + var priority internal.Priority + if txn.BetterCAT.Enabled { + priority = txn.BetterCAT.Priority + } else { + priority = internal.NewPriority() + } + + internal.CreateTxnMetrics(&txn.TxnData, h.Metrics) + internal.MergeBreakdownMetrics(&txn.TxnData, h.Metrics) + + if txn.Config.TransactionEvents.Enabled { + // Allocate a new TxnEvent to prevent a reference to the large transaction. + alloc := new(internal.TxnEvent) + *alloc = txn.TxnData.TxnEvent + h.TxnEvents.AddTxnEvent(alloc, priority) + } + + if txn.Reply.CollectErrors { + internal.MergeTxnErrors(&h.ErrorTraces, txn.Errors, txn.TxnEvent) + } + + if txn.Config.ErrorCollector.CaptureEvents { + for _, e := range txn.Errors { + errEvent := &internal.ErrorEvent{ + ErrorData: *e, + TxnEvent: txn.TxnEvent, + } + // Since the stack trace is not used in error events, remove the reference + // to minimize memory. + errEvent.Stack = nil + h.ErrorEvents.Add(errEvent, priority) + } + } + + if txn.shouldSaveTrace() { + h.TxnTraces.Witness(internal.HarvestTrace{ + TxnEvent: txn.TxnEvent, + Trace: txn.TxnTrace, + }) + } + + if nil != txn.SlowQueries { + h.SlowSQLs.Merge(txn.SlowQueries, txn.TxnEvent) + } + + if txn.BetterCAT.Sampled && txn.SpanEventsEnabled { + h.SpanEvents.MergeFromTransaction(&txn.TxnData) + } +} + +func headersJustWritten(txn *txn, code int, hdr http.Header) { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return + } + if txn.wroteHeader { + return + } + txn.wroteHeader = true + + internal.ResponseHeaderAttributes(txn.Attrs, hdr) + internal.ResponseCodeAttribute(txn.Attrs, code) + + if txn.appRun.responseCodeIsError(code) { + e := internal.TxnErrorFromResponseCode(time.Now(), code) + e.Stack = internal.GetStackTrace() + txn.noticeErrorInternal(e) + } +} + +func (txn *txn) responseHeader(hdr http.Header) http.Header { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return nil + } + if txn.wroteHeader { + return nil + } + if !txn.CrossProcess.Enabled { + return nil + } + if !txn.CrossProcess.IsInbound() { + return nil + } + txn.freezeName() + contentLength := internal.GetContentLengthFromHeader(hdr) + + appData, err := txn.CrossProcess.CreateAppData(txn.FinalName, txn.Queuing, time.Since(txn.Start), contentLength) + if err != nil { + txn.Config.Logger.Debug("error generating outbound response header", map[string]interface{}{ + "error": err, + }) + return nil + } + return internal.AppDataToHTTPHeader(appData) +} + +func addCrossProcessHeaders(txn *txn, hdr http.Header) { + // responseHeader() checks the wroteHeader field and returns a nil map if the + // header has been written, so we don't need a check here. + if nil != hdr { + for key, values := range txn.responseHeader(hdr) { + for _, value := range values { + hdr.Add(key, value) + } + } + } +} + +// getWriter is used to access the transaction's ResponseWriter. The +// ResponseWriter is mutex protected since it may be changed with +// txn.SetWebResponse, and we want changes to be visible across goroutines. The +// ResponseWriter is accessed using this getWriter() function rather than directly +// in mutex protected methods since we do NOT want the transaction to be locked +// while calling the ResponseWriter's methods. +func (txn *txn) getWriter() http.ResponseWriter { + txn.Lock() + rw := txn.writer + txn.Unlock() + return rw +} + +func nilSafeHeader(rw http.ResponseWriter) http.Header { + if nil == rw { + return nil + } + return rw.Header() +} + +func (txn *txn) Header() http.Header { + return nilSafeHeader(txn.getWriter()) +} + +func (txn *txn) Write(b []byte) (n int, err error) { + rw := txn.getWriter() + hdr := nilSafeHeader(rw) + + // This is safe to call unconditionally, even if Write() is called multiple + // times; see also the commentary in addCrossProcessHeaders(). + addCrossProcessHeaders(txn, hdr) + + if rw != nil { + n, err = rw.Write(b) + } + + headersJustWritten(txn, http.StatusOK, hdr) + + return +} + +func (txn *txn) WriteHeader(code int) { + rw := txn.getWriter() + hdr := nilSafeHeader(rw) + + addCrossProcessHeaders(txn, hdr) + + if nil != rw { + rw.WriteHeader(code) + } + + headersJustWritten(txn, code, hdr) +} + +func (thd *thread) End() error { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.finished = true + + r := recover() + if nil != r { + e := internal.TxnErrorFromPanic(time.Now(), r) + e.Stack = internal.GetStackTrace() + txn.noticeErrorInternal(e) + } + + txn.markEnd(time.Now(), thd.thread) + txn.freezeName() + // Make a sampling decision if there have been no segments or outbound + // payloads. + txn.lazilyCalculateSampled() + + // Finalise the CAT state. + if err := txn.CrossProcess.Finalise(txn.Name, txn.Config.AppName); err != nil { + txn.Config.Logger.Debug("error finalising the cross process state", map[string]interface{}{ + "error": err, + }) + } + + // Assign apdexThreshold regardless of whether or not the transaction + // gets apdex since it may be used to calculate the trace threshold. + txn.ApdexThreshold = internal.CalculateApdexThreshold(txn.Reply, txn.FinalName) + + if txn.getsApdex() { + if txn.HasErrors() { + txn.Zone = internal.ApdexFailing + } else { + txn.Zone = internal.CalculateApdexZone(txn.ApdexThreshold, txn.Duration) + } + } else { + txn.Zone = internal.ApdexNone + } + + if txn.Config.Logger.DebugEnabled() { + txn.Config.Logger.Debug("transaction ended", map[string]interface{}{ + "name": txn.FinalName, + "duration_ms": txn.Duration.Seconds() * 1000.0, + "ignored": txn.ignore, + "app_connected": "" != txn.Reply.RunID, + }) + } + + if !txn.ignore { + txn.Consumer.Consume(txn.Reply.RunID, txn) + } + + // Note that if a consumer uses `panic(nil)`, the panic will not + // propagate. + if nil != r { + panic(r) + } + + return nil +} + +func (txn *txn) AddAttribute(name string, value interface{}) error { + txn.Lock() + defer txn.Unlock() + + if txn.Config.HighSecurity { + return errHighSecurityEnabled + } + + if !txn.Reply.SecurityPolicies.CustomParameters.Enabled() { + return errSecurityPolicy + } + + if txn.finished { + return errAlreadyEnded + } + + return internal.AddUserAttribute(txn.Attrs, name, value, internal.DestAll) +} + +var ( + errorsDisabled = errors.New("errors disabled") + errNilError = errors.New("nil error") + errAlreadyEnded = errors.New("transaction has already ended") + errSecurityPolicy = errors.New("disabled by security policy") + errTransactionIgnored = errors.New("transaction has been ignored") + errBrowserDisabled = errors.New("browser disabled by local configuration") +) + +const ( + highSecurityErrorMsg = "message removed by high security setting" + securityPolicyErrorMsg = "message removed by security policy" +) + +func (txn *txn) noticeErrorInternal(err internal.ErrorData) error { + if !txn.Config.ErrorCollector.Enabled { + return errorsDisabled + } + + if nil == txn.Errors { + txn.Errors = internal.NewTxnErrors(internal.MaxTxnErrors) + } + + if txn.Config.HighSecurity { + err.Msg = highSecurityErrorMsg + } + + if !txn.Reply.SecurityPolicies.AllowRawExceptionMessages.Enabled() { + err.Msg = securityPolicyErrorMsg + } + + txn.Errors.Add(err) + txn.TxnData.TxnEvent.HasError = true //mark transaction as having an error + return nil +} + +var ( + errTooManyErrorAttributes = fmt.Errorf("too many extra attributes: limit is %d", + internal.AttributeErrorLimit) +) + +func (txn *txn) NoticeError(err error) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + if nil == err { + return errNilError + } + + e := internal.ErrorData{ + When: time.Now(), + Msg: err.Error(), + } + if ec, ok := err.(ErrorClasser); ok { + e.Klass = ec.ErrorClass() + } + if "" == e.Klass { + e.Klass = reflect.TypeOf(err).String() + } + if st, ok := err.(StackTracer); ok { + e.Stack = st.StackTrace() + // Note that if the provided stack trace is excessive in length, + // it will be truncated during JSON creation. + } + if nil == e.Stack { + e.Stack = internal.GetStackTrace() + } + + if ea, ok := err.(ErrorAttributer); ok && !txn.Config.HighSecurity && txn.Reply.SecurityPolicies.CustomParameters.Enabled() { + unvetted := ea.ErrorAttributes() + if len(unvetted) > internal.AttributeErrorLimit { + return errTooManyErrorAttributes + } + + e.ExtraAttributes = make(map[string]interface{}) + for key, val := range unvetted { + val, errr := internal.ValidateUserAttribute(key, val) + if nil != errr { + return errr + } + e.ExtraAttributes[key] = val + } + } + + return txn.noticeErrorInternal(e) +} + +func (txn *txn) SetName(name string) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.Name = name + return nil +} + +func (txn *txn) Ignore() error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + txn.ignore = true + return nil +} + +func (thd *thread) StartSegmentNow() SegmentStartTime { + var s internal.SegmentStartTime + txn := thd.txn + txn.Lock() + if !txn.finished { + s = internal.StartSegment(&txn.TxnData, thd.thread, time.Now()) + } + txn.Unlock() + return SegmentStartTime{ + segment: segment{ + start: s, + thread: thd, + }, + } +} + +const ( + // Browser fields are encoded using the first digits of the license + // key. + browserEncodingKeyLimit = 13 +) + +func browserEncodingKey(licenseKey string) []byte { + key := []byte(licenseKey) + if len(key) > browserEncodingKeyLimit { + key = key[0:browserEncodingKeyLimit] + } + return key +} + +func (txn *txn) BrowserTimingHeader() (*BrowserTimingHeader, error) { + txn.Lock() + defer txn.Unlock() + + if !txn.Config.BrowserMonitoring.Enabled { + return nil, errBrowserDisabled + } + + if txn.Reply.AgentLoader == "" { + // If the loader is empty, either browser has been disabled + // by the server or the application is not yet connected. + return nil, nil + } + + if txn.finished { + return nil, errAlreadyEnded + } + + txn.freezeName() + + // Freezing the name might cause the transaction to be ignored, so check + // this after txn.freezeName(). + if txn.ignore { + return nil, errTransactionIgnored + } + + encodingKey := browserEncodingKey(txn.Config.License) + + attrs, err := internal.Obfuscate(internal.BrowserAttributes(txn.Attrs), encodingKey) + if err != nil { + return nil, fmt.Errorf("error getting browser attributes: %v", err) + } + + name, err := internal.Obfuscate([]byte(txn.FinalName), encodingKey) + if err != nil { + return nil, fmt.Errorf("error obfuscating name: %v", err) + } + + return &BrowserTimingHeader{ + agentLoader: txn.Reply.AgentLoader, + info: browserInfo{ + Beacon: txn.Reply.Beacon, + LicenseKey: txn.Reply.BrowserKey, + ApplicationID: txn.Reply.AppID, + TransactionName: name, + QueueTimeMillis: txn.Queuing.Nanoseconds() / (1000 * 1000), + ApplicationTimeMillis: time.Now().Sub(txn.Start).Nanoseconds() / (1000 * 1000), + ObfuscatedAttributes: attrs, + ErrorBeacon: txn.Reply.ErrorBeacon, + Agent: txn.Reply.JSAgentFile, + }, + }, nil +} + +func createThread(txn *txn) *internal.Thread { + newThread := internal.NewThread(&txn.TxnData) + txn.asyncThreads = append(txn.asyncThreads, newThread) + return newThread +} + +func (thd *thread) NewGoroutine() Transaction { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.finished { + // If the transaction has finished, return the same thread. + return upgradeTxn(thd) + } + return upgradeTxn(&thread{ + thread: createThread(txn), + txn: txn, + }) +} + +type segment struct { + start internal.SegmentStartTime + thread *thread +} + +func endSegment(s *Segment) error { + if nil == s { + return nil + } + thd := s.StartTime.thread + if nil == thd { + return nil + } + txn := thd.txn + var err error + txn.Lock() + if txn.finished { + err = errAlreadyEnded + } else { + err = internal.EndBasicSegment(&txn.TxnData, thd.thread, s.StartTime.start, time.Now(), s.Name) + } + txn.Unlock() + return err +} + +func endDatastore(s *DatastoreSegment) error { + if nil == s { + return nil + } + thd := s.StartTime.thread + if nil == thd { + return nil + } + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + if txn.Config.HighSecurity { + s.QueryParameters = nil + } + if !txn.Config.DatastoreTracer.QueryParameters.Enabled { + s.QueryParameters = nil + } + if txn.Reply.SecurityPolicies.RecordSQL.IsSet() { + s.QueryParameters = nil + if !txn.Reply.SecurityPolicies.RecordSQL.Enabled() { + s.ParameterizedQuery = "" + } + } + if !txn.Config.DatastoreTracer.DatabaseNameReporting.Enabled { + s.DatabaseName = "" + } + if !txn.Config.DatastoreTracer.InstanceReporting.Enabled { + s.Host = "" + s.PortPathOrID = "" + } + return internal.EndDatastoreSegment(internal.EndDatastoreParams{ + TxnData: &txn.TxnData, + Thread: thd.thread, + Start: s.StartTime.start, + Now: time.Now(), + Product: string(s.Product), + Collection: s.Collection, + Operation: s.Operation, + ParameterizedQuery: s.ParameterizedQuery, + QueryParameters: s.QueryParameters, + Host: s.Host, + PortPathOrID: s.PortPathOrID, + Database: s.DatabaseName, + }) +} + +func externalSegmentMethod(s *ExternalSegment) string { + if "" != s.Procedure { + return s.Procedure + } + r := s.Request + if nil != s.Response && nil != s.Response.Request { + r = s.Response.Request + } + + if nil != r { + if "" != r.Method { + return r.Method + } + // Golang's http package states that when a client's Request has + // an empty string for Method, the method is GET. + return "GET" + } + + return "" +} + +func externalSegmentURL(s *ExternalSegment) (*url.URL, error) { + if "" != s.URL { + return url.Parse(s.URL) + } + r := s.Request + if nil != s.Response && nil != s.Response.Request { + r = s.Response.Request + } + if r != nil { + return r.URL, nil + } + return nil, nil +} + +func endExternal(s *ExternalSegment) error { + if nil == s { + return nil + } + thd := s.StartTime.thread + if nil == thd { + return nil + } + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + u, err := externalSegmentURL(s) + if nil != err { + return err + } + return internal.EndExternalSegment(internal.EndExternalParams{ + TxnData: &txn.TxnData, + Thread: thd.thread, + Start: s.StartTime.start, + Now: time.Now(), + Logger: txn.Config.Logger, + Response: s.Response, + URL: u, + Host: s.Host, + Library: s.Library, + Method: externalSegmentMethod(s), + }) +} + +// oldCATOutboundHeaders generates the Old CAT and Synthetics headers, depending +// on whether Old CAT is enabled or any Synthetics functionality has been +// triggered in the agent. +func oldCATOutboundHeaders(txn *txn) http.Header { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return http.Header{} + } + + metadata, err := txn.CrossProcess.CreateCrossProcessMetadata(txn.Name, txn.Config.AppName) + if err != nil { + txn.Config.Logger.Debug("error generating outbound headers", map[string]interface{}{ + "error": err, + }) + + // It's possible for CreateCrossProcessMetadata() to error and still have a + // Synthetics header, so we'll still fall through to returning headers + // based on whatever metadata was returned. + } + + return internal.MetadataToHTTPHeader(metadata) +} + +func outboundHeaders(s *ExternalSegment) http.Header { + thd := s.StartTime.thread + + if nil == thd { + return http.Header{} + } + txn := thd.txn + hdr := oldCATOutboundHeaders(txn) + + // hdr may be empty, or it may contain headers. If DistributedTracer + // is enabled, add more to the existing hdr + if p := thd.CreateDistributedTracePayload().HTTPSafe(); "" != p { + hdr.Add(DistributedTracePayloadHeader, p) + return hdr + } + + return hdr +} + +const ( + maxSampledDistributedPayloads = 35 +) + +type shimPayload struct{} + +func (s shimPayload) Text() string { return "" } +func (s shimPayload) HTTPSafe() string { return "" } + +func (thd *thread) CreateDistributedTracePayload() (payload DistributedTracePayload) { + payload = shimPayload{} + + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if !txn.BetterCAT.Enabled { + return + } + + if txn.finished { + txn.CreatePayloadException = true + return + } + + if "" == txn.Reply.AccountID || "" == txn.Reply.TrustedAccountKey { + // We can't create a payload: The application is not yet + // connected or serverless distributed tracing configuration was + // not provided. + return + } + + txn.numPayloadsCreated++ + + var p internal.Payload + p.Type = internal.CallerType + p.Account = txn.Reply.AccountID + + p.App = txn.Reply.PrimaryAppID + p.TracedID = txn.BetterCAT.TraceID() + p.Priority = txn.BetterCAT.Priority + p.Timestamp.Set(time.Now()) + p.TransactionID = txn.BetterCAT.ID // Set the transaction ID to the transaction guid. + + if txn.Reply.AccountID != txn.Reply.TrustedAccountKey { + p.TrustedAccountKey = txn.Reply.TrustedAccountKey + } + + sampled := txn.lazilyCalculateSampled() + if sampled && txn.SpanEventsEnabled { + p.ID = txn.CurrentSpanIdentifier(thd.thread) + } + + // limit the number of outbound sampled=true payloads to prevent too + // many downstream sampled events. + p.SetSampled(false) + if txn.numPayloadsCreated < maxSampledDistributedPayloads { + p.SetSampled(sampled) + } + + txn.CreatePayloadSuccess = true + + payload = p + return +} + +var ( + errOutboundPayloadCreated = errors.New("outbound payload already created") + errAlreadyAccepted = errors.New("AcceptDistributedTracePayload has already been called") + errInboundPayloadDTDisabled = errors.New("DistributedTracer must be enabled to accept an inbound payload") + errTrustedAccountKey = errors.New("trusted account key missing or does not match") +) + +func (txn *txn) AcceptDistributedTracePayload(t TransportType, p interface{}) error { + txn.Lock() + defer txn.Unlock() + + return txn.acceptDistributedTracePayloadLocked(t, p) +} + +func (txn *txn) acceptDistributedTracePayloadLocked(t TransportType, p interface{}) error { + + if !txn.BetterCAT.Enabled { + return errInboundPayloadDTDisabled + } + + if txn.finished { + txn.AcceptPayloadException = true + return errAlreadyEnded + } + + if txn.numPayloadsCreated > 0 { + txn.AcceptPayloadCreateBeforeAccept = true + return errOutboundPayloadCreated + } + + if txn.BetterCAT.Inbound != nil { + txn.AcceptPayloadIgnoredMultiple = true + return errAlreadyAccepted + } + + if nil == p { + txn.AcceptPayloadNullPayload = true + return nil + } + + if "" == txn.Reply.AccountID || "" == txn.Reply.TrustedAccountKey { + // We can't accept a payload: The application is not yet + // connected or serverless distributed tracing configuration was + // not provided. + return nil + } + + payload, err := internal.AcceptPayload(p) + if nil != err { + if _, ok := err.(internal.ErrPayloadParse); ok { + txn.AcceptPayloadParseException = true + } else if _, ok := err.(internal.ErrUnsupportedPayloadVersion); ok { + txn.AcceptPayloadIgnoredVersion = true + } else if _, ok := err.(internal.ErrPayloadMissingField); ok { + txn.AcceptPayloadParseException = true + } else { + txn.AcceptPayloadException = true + } + return err + } + + if nil == payload { + return nil + } + + // now that we have a parsed and alloc'd payload, + // let's make sure it has the correct fields + if err := payload.IsValid(); nil != err { + txn.AcceptPayloadParseException = true + return err + } + + // and let's also do our trustedKey check + receivedTrustKey := payload.TrustedAccountKey + if "" == receivedTrustKey { + receivedTrustKey = payload.Account + } + if receivedTrustKey != txn.Reply.TrustedAccountKey { + txn.AcceptPayloadUntrustedAccount = true + return errTrustedAccountKey + } + + if 0 != payload.Priority { + txn.BetterCAT.Priority = payload.Priority + } + + // a nul payload.Sampled means the a field wasn't provided + if nil != payload.Sampled { + txn.BetterCAT.Sampled = *payload.Sampled + txn.sampledCalculated = true + } + + txn.BetterCAT.Inbound = payload + + // TransportType's name field is not mutable outside of its package + // so the only check needed is if the caller is using an empty TransportType + txn.BetterCAT.Inbound.TransportType = t.name + if t.name == "" { + txn.BetterCAT.Inbound.TransportType = TransportUnknown.name + txn.Config.Logger.Debug("Invalid transport type, defaulting to Unknown", map[string]interface{}{}) + } + + if tm := payload.Timestamp.Time(); txn.Start.After(tm) { + txn.BetterCAT.Inbound.TransportDuration = txn.Start.Sub(tm) + } + + txn.AcceptPayloadSuccess = true + + return nil +} + +func (txn *txn) Application() Application { + return txn.app +} + +func (thd *thread) AddAgentSpanAttribute(key internal.SpanAttribute, val string) { + thd.thread.AddAgentSpanAttribute(key, val) +} + +var ( + // Ensure that txn implements AddAgentAttributer to avoid breaking + // integration package type assertions. + _ internal.AddAgentAttributer = &txn{} +) + +func (txn *txn) AddAgentAttribute(id internal.AgentAttributeID, stringVal string, otherVal interface{}) { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return + } + txn.Attrs.Agent.Add(id, stringVal, otherVal) +} + +func (thd *thread) GetTraceMetadata() (metadata TraceMetadata) { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return + } + + if txn.BetterCAT.Enabled { + metadata.TraceID = txn.BetterCAT.TraceID() + if txn.SpanEventsEnabled && txn.lazilyCalculateSampled() { + metadata.SpanID = txn.CurrentSpanIdentifier(thd.thread) + } + } + + return +} + +func (thd *thread) GetLinkingMetadata() (metadata LinkingMetadata) { + txn := thd.txn + metadata.EntityName = txn.appRun.firstAppName + metadata.EntityType = "SERVICE" + metadata.EntityGUID = txn.appRun.Reply.EntityGUID + metadata.Hostname = internal.ThisHost + + md := thd.GetTraceMetadata() + metadata.TraceID = md.TraceID + metadata.SpanID = md.SpanID + + return +} diff --git a/vendor/github.com/newrelic/go-agent/log.go b/vendor/github.com/newrelic/go-agent/log.go new file mode 100644 index 00000000000..aa2daf62c0c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/log.go @@ -0,0 +1,32 @@ +package newrelic + +import ( + "io" + + "github.com/newrelic/go-agent/internal/logger" +) + +// Logger is the interface that is used for logging in the go-agent. Assign the +// Config.Logger field to the Logger you wish to use. Loggers must be safe for +// use in multiple goroutines. Two Logger implementations are included: +// NewLogger, which logs at info level, and NewDebugLogger which logs at debug +// level. logrus and logxi are supported by the integration packages +// https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogrus and +// https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogxi/v1. +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// NewLogger creates a basic Logger at info level. +func NewLogger(w io.Writer) Logger { + return logger.New(w, false) +} + +// NewDebugLogger creates a basic Logger at debug level. +func NewDebugLogger(w io.Writer) Logger { + return logger.New(w, true) +} diff --git a/vendor/github.com/newrelic/go-agent/segments.go b/vendor/github.com/newrelic/go-agent/segments.go new file mode 100644 index 00000000000..d1a1d2a3c99 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/segments.go @@ -0,0 +1,167 @@ +package newrelic + +import ( + "net/http" +) + +// SegmentStartTime is created by Transaction.StartSegmentNow and marks the +// beginning of a segment. A segment with a zero-valued SegmentStartTime may +// safely be ended. +type SegmentStartTime struct{ segment } + +// Segment is used to instrument functions, methods, and blocks of code. The +// easiest way use Segment is the StartSegment function. +type Segment struct { + StartTime SegmentStartTime + Name string +} + +// DatastoreSegment is used to instrument calls to databases and object stores. +type DatastoreSegment struct { + // StartTime should be assigned using StartSegmentNow before each datastore + // call is made. + StartTime SegmentStartTime + + // Product, Collection, and Operation are highly recommended as they are + // used for aggregate metrics: + // + // Product is the datastore type. See the constants in + // https://github.com/newrelic/go-agent/blob/master/datastore.go. Product + // is one of the fields primarily responsible for the grouping of Datastore + // metrics. + Product DatastoreProduct + // Collection is the table or group being operated upon in the datastore, + // e.g. "users_table". This becomes the db.collection attribute on Span + // events and Transaction Trace segments. Collection is one of the fields + // primarily responsible for the grouping of Datastore metrics. + Collection string + // Operation is the relevant action, e.g. "SELECT" or "GET". Operation is + // one of the fields primarily responsible for the grouping of Datastore + // metrics. + Operation string + + // The following fields are used for extra metrics and added to instance + // data: + // + // ParameterizedQuery may be set to the query being performed. It must + // not contain any raw parameters, only placeholders. + ParameterizedQuery string + // QueryParameters may be used to provide query parameters. Care should + // be taken to only provide parameters which are not sensitive. + // QueryParameters are ignored in high security mode. The keys must contain + // fewer than than 255 bytes. The values must be numbers, strings, or + // booleans. + QueryParameters map[string]interface{} + // Host is the name of the server hosting the datastore. + Host string + // PortPathOrID can represent either the port, path, or id of the + // datastore being connected to. + PortPathOrID string + // DatabaseName is name of database instance where the current query is + // being executed. This becomes the db.instance attribute on Span events + // and Transaction Trace segments. + DatabaseName string +} + +// ExternalSegment instruments external calls. StartExternalSegment is the +// recommended way to create ExternalSegments. +type ExternalSegment struct { + StartTime SegmentStartTime + Request *http.Request + Response *http.Response + + // URL is an optional field which can be populated in lieu of Request if + // you don't have an http.Request. Either URL or Request must be + // populated. If both are populated then Request information takes + // priority. URL is parsed using url.Parse so it must include the + // protocol scheme (eg. "http://"). + URL string + // Host is an optional field that is automatically populated from the + // Request or URL. It is used for external metrics, transaction trace + // segment names, and span event names. Use this field to override the + // host in the URL or Request. This field does not override the host in + // the "http.url" attribute. + Host string + // Procedure is an optional field that can be set to the remote + // procedure being called. If set, this value will be used in metrics, + // transaction trace segment names, and span event names. If unset, the + // request's http method is used. + Procedure string + // Library is an optional field that defaults to "http". It is used for + // external metrics and the "component" span attribute. It should be + // the framework making the external call. + Library string +} + +// End finishes the segment. +func (s *Segment) End() error { return endSegment(s) } + +// End finishes the datastore segment. +func (s *DatastoreSegment) End() error { return endDatastore(s) } + +// End finishes the external segment. +func (s *ExternalSegment) End() error { return endExternal(s) } + +// OutboundHeaders returns the headers that should be attached to the external +// request. +func (s *ExternalSegment) OutboundHeaders() http.Header { + return outboundHeaders(s) +} + +// StartSegmentNow starts timing a segment. This function is recommended over +// Transaction.StartSegmentNow() because it is nil safe. +func StartSegmentNow(txn Transaction) SegmentStartTime { + if nil != txn { + return txn.StartSegmentNow() + } + return SegmentStartTime{} +} + +// StartSegment makes it easy to instrument segments. To time a function, do +// the following: +// +// func timeMe(txn newrelic.Transaction) { +// defer newrelic.StartSegment(txn, "timeMe").End() +// // ... function code here ... +// } +// +// To time a block of code, do the following: +// +// segment := StartSegment(txn, "myBlock") +// // ... code you want to time here ... +// segment.End() +// +func StartSegment(txn Transaction, name string) *Segment { + return &Segment{ + StartTime: StartSegmentNow(txn), + Name: name, + } +} + +// StartExternalSegment starts the instrumentation of an external call and adds +// distributed tracing headers to the request. If the Transaction parameter is +// nil then StartExternalSegment will look for a Transaction in the request's +// context using FromContext. +// +// Using the same http.Client for all of your external requests? Check out +// NewRoundTripper: You may not need to use StartExternalSegment at all! +// +func StartExternalSegment(txn Transaction, request *http.Request) *ExternalSegment { + if nil == txn { + txn = transactionFromRequestContext(request) + } + s := &ExternalSegment{ + StartTime: StartSegmentNow(txn), + Request: request, + } + + if request != nil && request.Header != nil { + for key, values := range s.OutboundHeaders() { + for _, value := range values { + request.Header.Add(key, value) + } + } + } + + return s +} diff --git a/vendor/github.com/newrelic/go-agent/sql_driver.go b/vendor/github.com/newrelic/go-agent/sql_driver.go new file mode 100644 index 00000000000..6a0312bb829 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/sql_driver.go @@ -0,0 +1,268 @@ +// +build go1.10 + +package newrelic + +import ( + "context" + "database/sql/driver" +) + +// SQLDriverSegmentBuilder populates DatastoreSegments for sql.Driver +// instrumentation. Use this to instrument a database that is not supported by +// an existing integration package (nrmysql, nrpq, and nrsqlite3). See +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmysql/nrmysql.go +// for example use. +type SQLDriverSegmentBuilder struct { + BaseSegment DatastoreSegment + ParseQuery func(segment *DatastoreSegment, query string) + ParseDSN func(segment *DatastoreSegment, dataSourceName string) +} + +// InstrumentSQLDriver wraps a driver.Driver, adding instrumentation for exec +// and query calls made with a transaction-containing context. Use this to +// instrument a database driver that is not supported by an existing integration +// package (nrmysql, nrpq, and nrsqlite3). See +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmysql/nrmysql.go +// for example use. +func InstrumentSQLDriver(d driver.Driver, bld SQLDriverSegmentBuilder) driver.Driver { + return optionalMethodsDriver(&wrapDriver{bld: bld, original: d}) +} + +// InstrumentSQLConnector wraps a driver.Connector, adding instrumentation for +// exec and query calls made with a transaction-containing context. Use this to +// instrument a database connector that is not supported by an existing +// integration package (nrmysql, nrpq, and nrsqlite3). See +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmysql/nrmysql.go +// for example use. +func InstrumentSQLConnector(connector driver.Connector, bld SQLDriverSegmentBuilder) driver.Connector { + return &wrapConnector{original: connector, bld: bld} +} + +func (bld SQLDriverSegmentBuilder) useDSN(dsn string) SQLDriverSegmentBuilder { + if f := bld.ParseDSN; nil != f { + f(&bld.BaseSegment, dsn) + } + return bld +} + +func (bld SQLDriverSegmentBuilder) useQuery(query string) SQLDriverSegmentBuilder { + if f := bld.ParseQuery; nil != f { + f(&bld.BaseSegment, query) + } + return bld +} + +func (bld SQLDriverSegmentBuilder) startSegment(ctx context.Context) DatastoreSegment { + segment := bld.BaseSegment + segment.StartTime = StartSegmentNow(FromContext(ctx)) + return segment +} + +type wrapDriver struct { + bld SQLDriverSegmentBuilder + original driver.Driver +} + +type wrapConnector struct { + bld SQLDriverSegmentBuilder + original driver.Connector +} + +type wrapConn struct { + bld SQLDriverSegmentBuilder + original driver.Conn +} + +type wrapStmt struct { + bld SQLDriverSegmentBuilder + original driver.Stmt +} + +func (w *wrapDriver) Open(name string) (driver.Conn, error) { + original, err := w.original.Open(name) + if err != nil { + return nil, err + } + return optionalMethodsConn(&wrapConn{ + original: original, + bld: w.bld.useDSN(name), + }), nil +} + +// OpenConnector implements DriverContext. +func (w *wrapDriver) OpenConnector(name string) (driver.Connector, error) { + original, err := w.original.(driver.DriverContext).OpenConnector(name) + if err != nil { + return nil, err + } + return &wrapConnector{ + original: original, + bld: w.bld.useDSN(name), + }, nil +} + +func (w *wrapConnector) Connect(ctx context.Context) (driver.Conn, error) { + original, err := w.original.Connect(ctx) + if nil != err { + return nil, err + } + return optionalMethodsConn(&wrapConn{ + bld: w.bld, + original: original, + }), nil +} + +func (w *wrapConnector) Driver() driver.Driver { + return optionalMethodsDriver(&wrapDriver{ + bld: w.bld, + original: w.original.Driver(), + }) +} + +func prepare(original driver.Stmt, err error, bld SQLDriverSegmentBuilder, query string) (driver.Stmt, error) { + if nil != err { + return nil, err + } + return optionalMethodsStmt(&wrapStmt{ + bld: bld.useQuery(query), + original: original, + }), nil +} + +func (w *wrapConn) Prepare(query string) (driver.Stmt, error) { + original, err := w.original.Prepare(query) + return prepare(original, err, w.bld, query) +} + +// PrepareContext implements ConnPrepareContext. +func (w *wrapConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + original, err := w.original.(driver.ConnPrepareContext).PrepareContext(ctx, query) + return prepare(original, err, w.bld, query) +} + +func (w *wrapConn) Close() error { + return w.original.Close() +} + +func (w *wrapConn) Begin() (driver.Tx, error) { + return w.original.Begin() +} + +// BeginTx implements ConnBeginTx. +func (w *wrapConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + return w.original.(driver.ConnBeginTx).BeginTx(ctx, opts) +} + +// Exec implements Execer. +func (w *wrapConn) Exec(query string, args []driver.Value) (driver.Result, error) { + return w.original.(driver.Execer).Exec(query, args) +} + +// ExecContext implements ExecerContext. +func (w *wrapConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + segment := w.bld.useQuery(query).startSegment(ctx) + result, err := w.original.(driver.ExecerContext).ExecContext(ctx, query, args) + if err != driver.ErrSkip { + segment.End() + } + return result, err +} + +// CheckNamedValue implements NamedValueChecker. +func (w *wrapConn) CheckNamedValue(v *driver.NamedValue) error { + return w.original.(driver.NamedValueChecker).CheckNamedValue(v) +} + +// Ping implements Pinger. +func (w *wrapConn) Ping(ctx context.Context) error { + return w.original.(driver.Pinger).Ping(ctx) +} + +func (w *wrapConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return w.original.(driver.Queryer).Query(query, args) +} + +// QueryContext implements QueryerContext. +func (w *wrapConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + segment := w.bld.useQuery(query).startSegment(ctx) + rows, err := w.original.(driver.QueryerContext).QueryContext(ctx, query, args) + if err != driver.ErrSkip { + segment.End() + } + return rows, err +} + +// ResetSession implements SessionResetter. +func (w *wrapConn) ResetSession(ctx context.Context) error { + return w.original.(driver.SessionResetter).ResetSession(ctx) +} + +func (w *wrapStmt) Close() error { + return w.original.Close() +} + +func (w *wrapStmt) NumInput() int { + return w.original.NumInput() +} + +func (w *wrapStmt) Exec(args []driver.Value) (driver.Result, error) { + return w.original.Exec(args) +} + +func (w *wrapStmt) Query(args []driver.Value) (driver.Rows, error) { + return w.original.Query(args) +} + +// ColumnConverter implements ColumnConverter. +func (w *wrapStmt) ColumnConverter(idx int) driver.ValueConverter { + return w.original.(driver.ColumnConverter).ColumnConverter(idx) +} + +// CheckNamedValue implements NamedValueChecker. +func (w *wrapStmt) CheckNamedValue(v *driver.NamedValue) error { + return w.original.(driver.NamedValueChecker).CheckNamedValue(v) +} + +// ExecContext implements StmtExecContext. +func (w *wrapStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + segment := w.bld.startSegment(ctx) + result, err := w.original.(driver.StmtExecContext).ExecContext(ctx, args) + segment.End() + return result, err +} + +// QueryContext implements StmtQueryContext. +func (w *wrapStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + segment := w.bld.startSegment(ctx) + rows, err := w.original.(driver.StmtQueryContext).QueryContext(ctx, args) + segment.End() + return rows, err +} + +var ( + _ interface { + driver.Driver + driver.DriverContext + } = &wrapDriver{} + _ interface { + driver.Connector + } = &wrapConnector{} + _ interface { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + } = &wrapConn{} + _ interface { + driver.Stmt + driver.ColumnConverter + driver.NamedValueChecker + driver.StmtExecContext + driver.StmtQueryContext + } = &wrapStmt{} +) diff --git a/vendor/github.com/newrelic/go-agent/sql_driver_optional_methods.go b/vendor/github.com/newrelic/go-agent/sql_driver_optional_methods.go new file mode 100644 index 00000000000..6663835a96f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/sql_driver_optional_methods.go @@ -0,0 +1,2240 @@ +// +build go1.10 + +package newrelic + +import "database/sql/driver" + +func optionalMethodsDriver(dv *wrapDriver) driver.Driver { + // GENERATED CODE DO NOT MODIFY + // This code generated by internal/tools/interface-wrapping + var ( + i0 int32 = 1 << 0 + ) + var interfaceSet int32 + if _, ok := dv.original.(driver.DriverContext); ok { + interfaceSet |= i0 + } + switch interfaceSet { + default: // No optional interfaces implemented + return struct { + driver.Driver + }{dv} + case i0: + return struct { + driver.Driver + driver.DriverContext + }{dv, dv} + } +} + +func optionalMethodsStmt(stmt *wrapStmt) driver.Stmt { + // GENERATED CODE DO NOT MODIFY + // This code generated by internal/tools/interface-wrapping + var ( + i0 int32 = 1 << 0 + i1 int32 = 1 << 1 + i2 int32 = 1 << 2 + i3 int32 = 1 << 3 + ) + var interfaceSet int32 + if _, ok := stmt.original.(driver.ColumnConverter); ok { + interfaceSet |= i0 + } + if _, ok := stmt.original.(driver.NamedValueChecker); ok { + interfaceSet |= i1 + } + if _, ok := stmt.original.(driver.StmtExecContext); ok { + interfaceSet |= i2 + } + if _, ok := stmt.original.(driver.StmtQueryContext); ok { + interfaceSet |= i3 + } + switch interfaceSet { + default: // No optional interfaces implemented + return struct { + driver.Stmt + }{stmt} + case i0: + return struct { + driver.Stmt + driver.ColumnConverter + }{stmt, stmt} + case i1: + return struct { + driver.Stmt + driver.NamedValueChecker + }{stmt, stmt} + case i0 | i1: + return struct { + driver.Stmt + driver.ColumnConverter + driver.NamedValueChecker + }{stmt, stmt, stmt} + case i2: + return struct { + driver.Stmt + driver.StmtExecContext + }{stmt, stmt} + case i0 | i2: + return struct { + driver.Stmt + driver.ColumnConverter + driver.StmtExecContext + }{stmt, stmt, stmt} + case i1 | i2: + return struct { + driver.Stmt + driver.NamedValueChecker + driver.StmtExecContext + }{stmt, stmt, stmt} + case i0 | i1 | i2: + return struct { + driver.Stmt + driver.ColumnConverter + driver.NamedValueChecker + driver.StmtExecContext + }{stmt, stmt, stmt, stmt} + case i3: + return struct { + driver.Stmt + driver.StmtQueryContext + }{stmt, stmt} + case i0 | i3: + return struct { + driver.Stmt + driver.ColumnConverter + driver.StmtQueryContext + }{stmt, stmt, stmt} + case i1 | i3: + return struct { + driver.Stmt + driver.NamedValueChecker + driver.StmtQueryContext + }{stmt, stmt, stmt} + case i0 | i1 | i3: + return struct { + driver.Stmt + driver.ColumnConverter + driver.NamedValueChecker + driver.StmtQueryContext + }{stmt, stmt, stmt, stmt} + case i2 | i3: + return struct { + driver.Stmt + driver.StmtExecContext + driver.StmtQueryContext + }{stmt, stmt, stmt} + case i0 | i2 | i3: + return struct { + driver.Stmt + driver.ColumnConverter + driver.StmtExecContext + driver.StmtQueryContext + }{stmt, stmt, stmt, stmt} + case i1 | i2 | i3: + return struct { + driver.Stmt + driver.NamedValueChecker + driver.StmtExecContext + driver.StmtQueryContext + }{stmt, stmt, stmt, stmt} + case i0 | i1 | i2 | i3: + return struct { + driver.Stmt + driver.ColumnConverter + driver.NamedValueChecker + driver.StmtExecContext + driver.StmtQueryContext + }{stmt, stmt, stmt, stmt, stmt} + } +} + +func optionalMethodsConn(conn *wrapConn) driver.Conn { + // GENERATED CODE DO NOT MODIFY + // This code generated by internal/tools/interface-wrapping + var ( + i0 int32 = 1 << 0 + i1 int32 = 1 << 1 + i2 int32 = 1 << 2 + i3 int32 = 1 << 3 + i4 int32 = 1 << 4 + i5 int32 = 1 << 5 + i6 int32 = 1 << 6 + i7 int32 = 1 << 7 + ) + var interfaceSet int32 + if _, ok := conn.original.(driver.ConnBeginTx); ok { + interfaceSet |= i0 + } + if _, ok := conn.original.(driver.ConnPrepareContext); ok { + interfaceSet |= i1 + } + if _, ok := conn.original.(driver.Execer); ok { + interfaceSet |= i2 + } + if _, ok := conn.original.(driver.ExecerContext); ok { + interfaceSet |= i3 + } + if _, ok := conn.original.(driver.NamedValueChecker); ok { + interfaceSet |= i4 + } + if _, ok := conn.original.(driver.Pinger); ok { + interfaceSet |= i5 + } + if _, ok := conn.original.(driver.Queryer); ok { + interfaceSet |= i6 + } + if _, ok := conn.original.(driver.QueryerContext); ok { + interfaceSet |= i7 + } + switch interfaceSet { + default: // No optional interfaces implemented + return struct { + driver.Conn + }{conn} + case i0: + return struct { + driver.Conn + driver.ConnBeginTx + }{conn, conn} + case i1: + return struct { + driver.Conn + driver.ConnPrepareContext + }{conn, conn} + case i0 | i1: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + }{conn, conn, conn} + case i2: + return struct { + driver.Conn + driver.Execer + }{conn, conn} + case i0 | i2: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + }{conn, conn, conn} + case i1 | i2: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + }{conn, conn, conn} + case i0 | i1 | i2: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + }{conn, conn, conn, conn} + case i3: + return struct { + driver.Conn + driver.ExecerContext + }{conn, conn} + case i0 | i3: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + }{conn, conn, conn} + case i1 | i3: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + }{conn, conn, conn} + case i0 | i1 | i3: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + }{conn, conn, conn, conn} + case i2 | i3: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + }{conn, conn, conn} + case i0 | i2 | i3: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + }{conn, conn, conn, conn} + case i1 | i2 | i3: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + }{conn, conn, conn, conn} + case i0 | i1 | i2 | i3: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + }{conn, conn, conn, conn, conn} + case i4: + return struct { + driver.Conn + driver.NamedValueChecker + }{conn, conn} + case i0 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + }{conn, conn, conn} + case i1 | i4: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + }{conn, conn, conn} + case i0 | i1 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i2 | i4: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + }{conn, conn, conn} + case i0 | i2 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i1 | i2 | i4: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i0 | i1 | i2 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + }{conn, conn, conn, conn, conn} + case i3 | i4: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn} + case i0 | i3 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i1 | i3 | i4: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i0 | i1 | i3 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn, conn} + case i2 | i3 | i4: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn} + case i0 | i2 | i3 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + }{conn, conn, conn, conn, conn, conn} + case i5: + return struct { + driver.Conn + driver.Pinger + }{conn, conn} + case i0 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Pinger + }{conn, conn, conn} + case i1 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Pinger + }{conn, conn, conn} + case i0 | i1 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Pinger + }{conn, conn, conn, conn} + case i2 | i5: + return struct { + driver.Conn + driver.Execer + driver.Pinger + }{conn, conn, conn} + case i0 | i2 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Pinger + }{conn, conn, conn, conn} + case i1 | i2 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i1 | i2 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Pinger + }{conn, conn, conn, conn, conn} + case i3 | i5: + return struct { + driver.Conn + driver.ExecerContext + driver.Pinger + }{conn, conn, conn} + case i0 | i3 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn} + case i1 | i3 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i1 | i3 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn, conn} + case i2 | i3 | i5: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i2 | i3 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + }{conn, conn, conn, conn, conn, conn} + case i4 | i5: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn} + case i0 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn} + case i1 | i4 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i1 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i2 | i4 | i5: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i2 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn, conn} + case i3 | i4 | i5: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn} + case i0 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i5: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i5: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + }{conn, conn, conn, conn, conn, conn, conn} + case i6: + return struct { + driver.Conn + driver.Queryer + }{conn, conn} + case i0 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Queryer + }{conn, conn, conn} + case i1 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Queryer + }{conn, conn, conn} + case i0 | i1 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Queryer + }{conn, conn, conn, conn} + case i2 | i6: + return struct { + driver.Conn + driver.Execer + driver.Queryer + }{conn, conn, conn} + case i0 | i2 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Queryer + }{conn, conn, conn, conn} + case i1 | i2 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i1 | i2 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Queryer + }{conn, conn, conn, conn, conn} + case i3 | i6: + return struct { + driver.Conn + driver.ExecerContext + driver.Queryer + }{conn, conn, conn} + case i0 | i3 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn} + case i1 | i3 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i1 | i3 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn, conn} + case i2 | i3 | i6: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i2 | i3 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i4 | i6: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn} + case i0 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn} + case i1 | i4 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i1 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i2 | i4 | i6: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i2 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i3 | i4 | i6: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i6: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i5 | i6: + return struct { + driver.Conn + driver.Pinger + driver.Queryer + }{conn, conn, conn} + case i0 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn} + case i1 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i1 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i2 | i5 | i6: + return struct { + driver.Conn + driver.Execer + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i2 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i2 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i3 | i5 | i6: + return struct { + driver.Conn + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i5 | i6: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i4 | i5 | i6: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn} + case i0 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i1 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i1 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i2 | i4 | i5 | i6: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i2 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn} + case i0 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i5 | i6: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i7: + return struct { + driver.Conn + driver.QueryerContext + }{conn, conn} + case i0 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.QueryerContext + }{conn, conn, conn} + case i1 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.QueryerContext + }{conn, conn, conn} + case i0 | i1 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.QueryerContext + }{conn, conn, conn, conn} + case i2 | i7: + return struct { + driver.Conn + driver.Execer + driver.QueryerContext + }{conn, conn, conn} + case i0 | i2 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.QueryerContext + }{conn, conn, conn, conn} + case i1 | i2 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i1 | i2 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i3 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn} + case i0 | i3 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn} + case i1 | i3 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i1 | i3 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i2 | i3 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i2 | i3 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i4 | i7: + return struct { + driver.Conn + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn} + case i0 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn} + case i1 | i4 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i1 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i2 | i4 | i7: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i2 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i3 | i4 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i5 | i7: + return struct { + driver.Conn + driver.Pinger + driver.QueryerContext + }{conn, conn, conn} + case i0 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn} + case i1 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i1 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i2 | i5 | i7: + return struct { + driver.Conn + driver.Execer + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i2 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i2 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i3 | i5 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i5 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i4 | i5 | i7: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i4 | i5 | i7: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i5 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i6 | i7: + return struct { + driver.Conn + driver.Queryer + driver.QueryerContext + }{conn, conn, conn} + case i0 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i1 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i1 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i2 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i2 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i2 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i3 | i6 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i3 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i4 | i6 | i7: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i4 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i5 | i6 | i7: + return struct { + driver.Conn + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn} + case i0 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i1 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i1 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i2 | i5 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i2 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i2 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i2 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn} + case i0 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i1 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i1 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i2 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i2 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn} + case i0 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i1 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i2 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn} + case i0 | i2 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i1 | i2 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn} + case i0 | i1 | i2 | i3 | i4 | i5 | i6 | i7: + return struct { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.Execer + driver.ExecerContext + driver.NamedValueChecker + driver.Pinger + driver.Queryer + driver.QueryerContext + }{conn, conn, conn, conn, conn, conn, conn, conn, conn} + } +} diff --git a/vendor/github.com/newrelic/go-agent/transaction.go b/vendor/github.com/newrelic/go-agent/transaction.go new file mode 100644 index 00000000000..5f344cc84e0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/transaction.go @@ -0,0 +1,262 @@ +package newrelic + +import ( + "net/http" + "net/url" +) + +// Transaction instruments one logical unit of work: either an inbound web +// request or background task. Start a new Transaction with the +// Application.StartTransaction() method. +type Transaction interface { + // The transaction's http.ResponseWriter methods delegate to the + // http.ResponseWriter provided as a parameter to + // Application.StartTransaction or Transaction.SetWebResponse. This + // allows instrumentation of the response code and response headers. + // These methods may be called safely if the transaction does not have a + // http.ResponseWriter. + http.ResponseWriter + + // End finishes the Transaction. After that, subsequent calls to End or + // other Transaction methods have no effect. All segments and + // instrumentation must be completed before End is called. + End() error + + // Ignore prevents this transaction's data from being recorded. + Ignore() error + + // SetName names the transaction. Use a limited set of unique names to + // ensure that Transactions are grouped usefully. + SetName(name string) error + + // NoticeError records an error. The Transaction saves the first five + // errors. For more control over the recorded error fields, see the + // newrelic.Error type. In certain situations, using this method may + // result in an error being recorded twice: Errors are automatically + // recorded when Transaction.WriteHeader receives a status code above + // 400 or below 100 that is not in the IgnoreStatusCodes configuration + // list. This method is unaffected by the IgnoreStatusCodes + // configuration list. + NoticeError(err error) error + + // AddAttribute adds a key value pair to the transaction event, errors, + // and traces. + // + // The key must contain fewer than than 255 bytes. The value must be a + // number, string, or boolean. + // + // For more information, see: + // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/collect-custom-attributes + AddAttribute(key string, value interface{}) error + + // SetWebRequest marks the transaction as a web transaction. If + // WebRequest is non-nil, SetWebRequest will additionally collect + // details on request attributes, url, and method. If headers are + // present, the agent will look for a distributed tracing header. Use + // NewWebRequest to transform a *http.Request into a WebRequest. + SetWebRequest(WebRequest) error + + // SetWebResponse sets transaction's http.ResponseWriter. After calling + // this method, the transaction may be used in place of the + // ResponseWriter to intercept the response code. This method is useful + // when the ResponseWriter is not available at the beginning of the + // transaction (if so, it can be given as a parameter to + // Application.StartTransaction). This method will return a reference + // to the transaction which implements the combination of + // http.CloseNotifier, http.Flusher, http.Hijacker, and io.ReaderFrom + // implemented by the ResponseWriter. + SetWebResponse(http.ResponseWriter) Transaction + + // StartSegmentNow starts timing a segment. The SegmentStartTime + // returned can be used as the StartTime field in Segment, + // DatastoreSegment, or ExternalSegment. We recommend using the + // StartSegmentNow function instead of this method since it checks if + // the Transaction is nil. + StartSegmentNow() SegmentStartTime + + // CreateDistributedTracePayload creates a payload used to link + // transactions. CreateDistributedTracePayload should be called every + // time an outbound call is made since the payload contains a timestamp. + // + // StartExternalSegment calls CreateDistributedTracePayload, so you + // don't need to use it for outbound HTTP calls: Just use + // StartExternalSegment! + // + // This method never returns nil. If the application is disabled or not + // yet connected then this method returns a shim implementation whose + // methods return empty strings. + CreateDistributedTracePayload() DistributedTracePayload + + // AcceptDistributedTracePayload links transactions by accepting a + // distributed trace payload from another transaction. + // + // Application.StartTransaction calls this method automatically if a + // payload is present in the request headers. Therefore, this method + // does not need to be used for typical HTTP transactions. + // + // AcceptDistributedTracePayload should be used as early in the + // transaction as possible. It may not be called after a call to + // CreateDistributedTracePayload. + // + // The payload parameter may be a DistributedTracePayload, a string, or + // a []byte. + AcceptDistributedTracePayload(t TransportType, payload interface{}) error + + // Application returns the Application which started the transaction. + Application() Application + + // BrowserTimingHeader generates the JavaScript required to enable New + // Relic's Browser product. This code should be placed into your pages + // as close to the top of the element as possible, but after any + // position-sensitive tags (for example, X-UA-Compatible or + // charset information). + // + // This function freezes the transaction name: any calls to SetName() + // after BrowserTimingHeader() will be ignored. + // + // The *BrowserTimingHeader return value will be nil if browser + // monitoring is disabled, the application is not connected, or an error + // occurred. It is safe to call the pointer's methods if it is nil. + BrowserTimingHeader() (*BrowserTimingHeader, error) + + // NewGoroutine allows you to use the Transaction in multiple + // goroutines. + // + // Each goroutine must have its own Transaction reference returned by + // NewGoroutine. You must call NewGoroutine to get a new Transaction + // reference every time you wish to pass the Transaction to another + // goroutine. It does not matter if you call this before or after the + // other goroutine has started. + // + // All Transaction methods can be used in any Transaction reference. + // The Transaction will end when End() is called in any goroutine. + // + // Example passing a new Transaction reference directly to another + // goroutine: + // + // go func(txn newrelic.Transaction) { + // defer newrelic.StartSegment(txn, "async").End() + // time.Sleep(100 * time.Millisecond) + // }(txn.NewGoroutine()) + // + // Example passing a new Transaction reference on a channel to another + // goroutine: + // + // ch := make(chan newrelic.Transaction) + // go func() { + // txn := <-ch + // defer newrelic.StartSegment(txn, "async").End() + // time.Sleep(100 * time.Millisecond) + // }() + // ch <- txn.NewGoroutine() + // + NewGoroutine() Transaction + + // GetTraceMetadata returns distributed tracing identifiers. Empty + // string identifiers are returned if the transaction has finished. + GetTraceMetadata() TraceMetadata + + // GetLinkingMetadata returns the fields needed to link data to a trace or + // entity. + GetLinkingMetadata() LinkingMetadata +} + +// DistributedTracePayload traces requests between applications or processes. +// DistributedTracePayloads are automatically added to HTTP requests by +// StartExternalSegment, so you only need to use this if you are tracing through +// a message queue or another non-HTTP communication library. The +// DistributedTracePayload may be marshalled in one of two formats: HTTPSafe or +// Text. All New Relic agents can accept payloads in either format. +type DistributedTracePayload interface { + // HTTPSafe serializes the payload into a string containing http safe + // characters. + HTTPSafe() string + // Text serializes the payload into a string. The format is slightly + // more compact than HTTPSafe. + Text() string +} + +const ( + // DistributedTracePayloadHeader is the header used by New Relic agents + // for automatic trace payload instrumentation. + DistributedTracePayloadHeader = "Newrelic" +) + +// TransportType is used in Transaction.AcceptDistributedTracePayload() to +// represent the type of connection that the trace payload was transported over. +type TransportType struct{ name string } + +// TransportType names used across New Relic agents: +var ( + TransportUnknown = TransportType{name: "Unknown"} + TransportHTTP = TransportType{name: "HTTP"} + TransportHTTPS = TransportType{name: "HTTPS"} + TransportKafka = TransportType{name: "Kafka"} + TransportJMS = TransportType{name: "JMS"} + TransportIronMQ = TransportType{name: "IronMQ"} + TransportAMQP = TransportType{name: "AMQP"} + TransportQueue = TransportType{name: "Queue"} + TransportOther = TransportType{name: "Other"} +) + +// WebRequest may be implemented to provide request information to +// Transaction.SetWebRequest. +type WebRequest interface { + // Header may return nil if you don't have any headers or don't want to + // transform them to http.Header format. + Header() http.Header + // URL may return nil if you don't have a URL or don't want to transform + // it to *url.URL. + URL() *url.URL + Method() string + // If a distributed tracing header is found in the headers returned by + // Header(), this TransportType will be used in the distributed tracing + // metrics. + Transport() TransportType +} + +// NewWebRequest turns a *http.Request into a WebRequest for input into +// Transaction.SetWebRequest. +func NewWebRequest(request *http.Request) WebRequest { + if nil == request { + return nil + } + return requestWrap{request: request} +} + +// NewStaticWebRequest takes the minimum necessary information and creates a static WebRequest out of it +func NewStaticWebRequest(hdrs http.Header, url *url.URL, method string, transport TransportType) WebRequest { + return staticWebRequest{hdrs, url, method, transport} +} + +// LinkingMetadata is returned by Transaction.GetLinkingMetadata. It contains +// identifiers needed link data to a trace or entity. +type LinkingMetadata struct { + // TraceID identifies the entire distributed trace. This field is empty + // if distributed tracing is disabled. + TraceID string + // SpanID identifies the currently active segment. This field is empty + // if distributed tracing is disabled or the transaction is not sampled. + SpanID string + // EntityName is the Application name as set on the newrelic.Config. If + // multiple application names are specified, only the first is returned. + EntityName string + // EntityType is the type of this entity and is always the string + // "SERVICE". + EntityType string + // EntityGUID is the unique identifier for this entity. + EntityGUID string + // Hostname is the hostname this entity is running on. + Hostname string +} + +// TraceMetadata is returned by Transaction.GetTraceMetadata. It contains +// distributed tracing identifiers. +type TraceMetadata struct { + // TraceID identifies the entire distributed trace. This field is empty + // if distributed tracing is disabled. + TraceID string + // SpanID identifies the currently active segment. This field is empty + // if distributed tracing is disabled or the transaction is not sampled. + SpanID string +} diff --git a/vendor/github.com/newrelic/go-agent/version.go b/vendor/github.com/newrelic/go-agent/version.go new file mode 100644 index 00000000000..ab5d20dd68d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/version.go @@ -0,0 +1,14 @@ +package newrelic + +import "github.com/newrelic/go-agent/internal" + +const ( + major = "2" + minor = "13" + patch = "0" + + // Version is the full string version of this Go Agent. + Version = major + "." + minor + "." + patch +) + +func init() { internal.TrackUsage("Go", "Version", Version) } diff --git a/vendor/github.com/nsf/jsondiff/.atom-build.json b/vendor/github.com/nsf/jsondiff/.atom-build.json new file mode 100644 index 00000000000..5de8573d26c --- /dev/null +++ b/vendor/github.com/nsf/jsondiff/.atom-build.json @@ -0,0 +1,18 @@ +{ + "cmd": "go", + "name": "go install", + "sh": false, + "args": ["install"], + "cwd": "{PROJECT_PATH}", + "errorMatch": [ + "(?[\\/0-9a-zA-Z\\._]+):(?\\d+)" + ], + "targets": { + "go test": { + "cmd": "go", + "sh": false, + "args": ["test"], + "cwd": "{PROJECT_PATH}" + } + } +} diff --git a/vendor/github.com/nsf/jsondiff/.gitignore b/vendor/github.com/nsf/jsondiff/.gitignore new file mode 100644 index 00000000000..54970529083 --- /dev/null +++ b/vendor/github.com/nsf/jsondiff/.gitignore @@ -0,0 +1,2 @@ +/webdemo/webdemo.js +/webdemo/webdemo.js.map diff --git a/vendor/github.com/nsf/jsondiff/LICENSE b/vendor/github.com/nsf/jsondiff/LICENSE new file mode 100644 index 00000000000..90d59f5e3e7 --- /dev/null +++ b/vendor/github.com/nsf/jsondiff/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2019 nsf + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nsf/jsondiff/README.md b/vendor/github.com/nsf/jsondiff/README.md new file mode 100644 index 00000000000..e4503d43ebd --- /dev/null +++ b/vendor/github.com/nsf/jsondiff/README.md @@ -0,0 +1,29 @@ +# JsonDiff library + +The main purpose of the library is integration into tests which use json and providing human-readable output of test results. + +The lib can compare two json items and return a detailed report of the comparison. + +At the moment it can detect a couple of types of differences: + + - FullMatch - means items are identical. + - SupersetMatch - means first item is a superset of a second item. + - NoMatch - means objects are different. + +Being a superset means that every object and array which don't match completely in a second item must be a subset of a first item. For example: + +```json +{"a": 1, "b": 2, "c": 3} +``` + +Is a superset of (or second item is a subset of a first one): + +```json +{"a": 1, "c": 3} +``` + +Library API documentation can be found on godoc.org: https://godoc.org/github.com/nsf/jsondiff + +You can try **LIVE** version here (thanks to [gopherjs](https://github.com/gopherjs/gopherjs)): https://nosmileface.dev/jsondiff + +The library is inspired by http://tlrobinson.net/projects/javascript-fun/jsondiff/ diff --git a/vendor/github.com/nsf/jsondiff/jsondiff.go b/vendor/github.com/nsf/jsondiff/jsondiff.go new file mode 100644 index 00000000000..988c29ae06e --- /dev/null +++ b/vendor/github.com/nsf/jsondiff/jsondiff.go @@ -0,0 +1,430 @@ +package jsondiff + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" + "strconv" +) + +type Difference int + +const ( + FullMatch Difference = iota + SupersetMatch + NoMatch + FirstArgIsInvalidJson + SecondArgIsInvalidJson + BothArgsAreInvalidJson +) + +func (d Difference) String() string { + switch d { + case FullMatch: + return "FullMatch" + case SupersetMatch: + return "SupersetMatch" + case NoMatch: + return "NoMatch" + case FirstArgIsInvalidJson: + return "FirstArgIsInvalidJson" + case SecondArgIsInvalidJson: + return "SecondArgIsInvalidJson" + case BothArgsAreInvalidJson: + return "BothArgsAreInvalidJson" + } + return "Invalid" +} + +type Tag struct { + Begin string + End string +} + +type Options struct { + Normal Tag + Added Tag + Removed Tag + Changed Tag + Prefix string + Indent string + PrintTypes bool + ChangedSeparator string + // When provided, this function will be used to compare two numbers. By default numbers are compared using their + // literal representation byte by byte. + CompareNumbers func(a, b json.Number) bool +} + +// Provides a set of options in JSON format that are fully parseable. +func DefaultJSONOptions() Options { + return Options{ + Added: Tag{Begin: "\"prop-added\":{", End: "}"}, + Removed: Tag{Begin: "\"prop-removed\":{", End: "}"}, + Changed: Tag{Begin: "{\"changed\":[", End: "]}"}, + ChangedSeparator: ", ", + Indent: " ", + } +} + +// Provides a set of options that are well suited for console output. Options +// use ANSI foreground color escape sequences to highlight changes. +func DefaultConsoleOptions() Options { + return Options{ + Added: Tag{Begin: "\033[0;32m", End: "\033[0m"}, + Removed: Tag{Begin: "\033[0;31m", End: "\033[0m"}, + Changed: Tag{Begin: "\033[0;33m", End: "\033[0m"}, + ChangedSeparator: " => ", + Indent: " ", + } +} + +// Provides a set of options that are well suited for HTML output. Works best +// inside
 tag.
+func DefaultHTMLOptions() Options {
+	return Options{
+		Added:            Tag{Begin: ``, End: ``},
+		Removed:          Tag{Begin: ``, End: ``},
+		Changed:          Tag{Begin: ``, End: ``},
+		ChangedSeparator: " => ",
+		Indent:           "    ",
+	}
+}
+
+type context struct {
+	opts    *Options
+	buf     bytes.Buffer
+	level   int
+	lastTag *Tag
+	diff    Difference
+}
+
+func (ctx *context) compareNumbers(a, b json.Number) bool {
+	if ctx.opts.CompareNumbers != nil {
+		return ctx.opts.CompareNumbers(a, b)
+	} else {
+		return a == b
+	}
+}
+
+func (ctx *context) newline(s string) {
+	ctx.buf.WriteString(s)
+	if ctx.lastTag != nil {
+		ctx.buf.WriteString(ctx.lastTag.End)
+	}
+	ctx.buf.WriteString("\n")
+	ctx.buf.WriteString(ctx.opts.Prefix)
+	for i := 0; i < ctx.level; i++ {
+		ctx.buf.WriteString(ctx.opts.Indent)
+	}
+	if ctx.lastTag != nil {
+		ctx.buf.WriteString(ctx.lastTag.Begin)
+	}
+}
+
+func (ctx *context) key(k string) {
+	ctx.buf.WriteString(strconv.Quote(k))
+	ctx.buf.WriteString(": ")
+}
+
+func (ctx *context) writeValue(v interface{}, full bool) {
+	switch vv := v.(type) {
+	case bool:
+		ctx.buf.WriteString(strconv.FormatBool(vv))
+	case json.Number:
+		ctx.buf.WriteString(string(vv))
+	case string:
+		ctx.buf.WriteString(strconv.Quote(vv))
+	case []interface{}:
+		if full {
+			if len(vv) == 0 {
+				ctx.buf.WriteString("[")
+			} else {
+				ctx.level++
+				ctx.newline("[")
+			}
+			for i, v := range vv {
+				ctx.writeValue(v, true)
+				if i != len(vv)-1 {
+					ctx.newline(",")
+				} else {
+					ctx.level--
+					ctx.newline("")
+				}
+			}
+			ctx.buf.WriteString("]")
+		} else {
+			ctx.buf.WriteString("[]")
+		}
+	case map[string]interface{}:
+		if full {
+			if len(vv) == 0 {
+				ctx.buf.WriteString("{")
+			} else {
+				ctx.level++
+				ctx.newline("{")
+			}
+			i := 0
+			for k, v := range vv {
+				ctx.key(k)
+				ctx.writeValue(v, true)
+				if i != len(vv)-1 {
+					ctx.newline(",")
+				} else {
+					ctx.level--
+					ctx.newline("")
+				}
+				i++
+			}
+			ctx.buf.WriteString("}")
+		} else {
+			ctx.buf.WriteString("{}")
+		}
+	default:
+		ctx.buf.WriteString("null")
+	}
+
+	ctx.writeTypeMaybe(v)
+}
+
+func (ctx *context) writeTypeMaybe(v interface{}) {
+	if ctx.opts.PrintTypes {
+		ctx.buf.WriteString(" ")
+		ctx.writeType(v)
+	}
+}
+
+func (ctx *context) writeType(v interface{}) {
+	switch v.(type) {
+	case bool:
+		ctx.buf.WriteString("(boolean)")
+	case json.Number:
+		ctx.buf.WriteString("(number)")
+	case string:
+		ctx.buf.WriteString("(string)")
+	case []interface{}:
+		ctx.buf.WriteString("(array)")
+	case map[string]interface{}:
+		ctx.buf.WriteString("(object)")
+	default:
+		ctx.buf.WriteString("(null)")
+	}
+}
+
+func (ctx *context) writeMismatch(a, b interface{}) {
+	ctx.writeValue(a, false)
+	ctx.buf.WriteString(ctx.opts.ChangedSeparator)
+	ctx.writeValue(b, false)
+}
+
+func (ctx *context) tag(tag *Tag) {
+	if ctx.lastTag == tag {
+		return
+	} else if ctx.lastTag != nil {
+		ctx.buf.WriteString(ctx.lastTag.End)
+	}
+	ctx.buf.WriteString(tag.Begin)
+	ctx.lastTag = tag
+}
+
+func (ctx *context) result(d Difference) {
+	if d == NoMatch {
+		ctx.diff = NoMatch
+	} else if d == SupersetMatch && ctx.diff != NoMatch {
+		ctx.diff = SupersetMatch
+	} else if ctx.diff != NoMatch && ctx.diff != SupersetMatch {
+		ctx.diff = FullMatch
+	}
+}
+
+func (ctx *context) printMismatch(a, b interface{}) {
+	ctx.tag(&ctx.opts.Changed)
+	ctx.writeMismatch(a, b)
+}
+
+func (ctx *context) printDiff(a, b interface{}) {
+	if a == nil || b == nil {
+		if a == nil && b == nil {
+			ctx.tag(&ctx.opts.Normal)
+			ctx.writeValue(a, false)
+			ctx.result(FullMatch)
+		} else {
+			ctx.printMismatch(a, b)
+			ctx.result(NoMatch)
+		}
+		return
+	}
+
+	ka := reflect.TypeOf(a).Kind()
+	kb := reflect.TypeOf(b).Kind()
+	if ka != kb {
+		ctx.printMismatch(a, b)
+		ctx.result(NoMatch)
+		return
+	}
+	switch ka {
+	case reflect.Bool:
+		if a.(bool) != b.(bool) {
+			ctx.printMismatch(a, b)
+			ctx.result(NoMatch)
+			return
+		}
+	case reflect.String:
+		switch aa := a.(type) {
+		case json.Number:
+			bb, ok := b.(json.Number)
+			if !ok || !ctx.compareNumbers(aa, bb) {
+				ctx.printMismatch(a, b)
+				ctx.result(NoMatch)
+				return
+			}
+		case string:
+			bb, ok := b.(string)
+			if !ok || aa != bb {
+				ctx.printMismatch(a, b)
+				ctx.result(NoMatch)
+				return
+			}
+		}
+	case reflect.Slice:
+		sa, sb := a.([]interface{}), b.([]interface{})
+		salen, sblen := len(sa), len(sb)
+		max := salen
+		if sblen > max {
+			max = sblen
+		}
+		ctx.tag(&ctx.opts.Normal)
+		if max == 0 {
+			ctx.buf.WriteString("[")
+		} else {
+			ctx.level++
+			ctx.newline("[")
+		}
+		for i := 0; i < max; i++ {
+			if i < salen && i < sblen {
+				ctx.printDiff(sa[i], sb[i])
+			} else if i < salen {
+				ctx.tag(&ctx.opts.Removed)
+				ctx.writeValue(sa[i], true)
+				ctx.result(SupersetMatch)
+			} else if i < sblen {
+				ctx.tag(&ctx.opts.Added)
+				ctx.writeValue(sb[i], true)
+				ctx.result(NoMatch)
+			}
+			ctx.tag(&ctx.opts.Normal)
+			if i != max-1 {
+				ctx.newline(",")
+			} else {
+				ctx.level--
+				ctx.newline("")
+			}
+		}
+		ctx.buf.WriteString("]")
+		ctx.writeTypeMaybe(a)
+		return
+	case reflect.Map:
+		ma, mb := a.(map[string]interface{}), b.(map[string]interface{})
+		keysMap := make(map[string]bool)
+		for k := range ma {
+			keysMap[k] = true
+		}
+		for k := range mb {
+			keysMap[k] = true
+		}
+		keys := make([]string, 0, len(keysMap))
+		for k := range keysMap {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		ctx.tag(&ctx.opts.Normal)
+		if len(keys) == 0 {
+			ctx.buf.WriteString("{")
+		} else {
+			ctx.level++
+			ctx.newline("{")
+		}
+		for i, k := range keys {
+			va, aok := ma[k]
+			vb, bok := mb[k]
+			if aok && bok {
+				ctx.key(k)
+				ctx.printDiff(va, vb)
+			} else if aok {
+				ctx.tag(&ctx.opts.Removed)
+				ctx.key(k)
+				ctx.writeValue(va, true)
+				ctx.result(SupersetMatch)
+			} else if bok {
+				ctx.tag(&ctx.opts.Added)
+				ctx.key(k)
+				ctx.writeValue(vb, true)
+				ctx.result(NoMatch)
+			}
+			ctx.tag(&ctx.opts.Normal)
+			if i != len(keys)-1 {
+				ctx.newline(",")
+			} else {
+				ctx.level--
+				ctx.newline("")
+			}
+		}
+		ctx.buf.WriteString("}")
+		ctx.writeTypeMaybe(a)
+		return
+	}
+	ctx.tag(&ctx.opts.Normal)
+	ctx.writeValue(a, true)
+	ctx.result(FullMatch)
+}
+
+// Compares two JSON documents using given options. Returns difference type and
+// a string describing differences.
+//
+// FullMatch means provided arguments are deeply equal.
+//
+// SupersetMatch means first argument is a superset of a second argument. In
+// this context being a superset means that for each object or array in the
+// hierarchy which don't match exactly, it must be a superset of another one.
+// For example:
+//
+//     {"a": 123, "b": 456, "c": [7, 8, 9]}
+//
+// Is a superset of:
+//
+//     {"a": 123, "c": [7, 8]}
+//
+// NoMatch means there is no match.
+//
+// The rest of the difference types mean that one of or both JSON documents are
+// invalid JSON.
+//
+// Returned string uses a format similar to pretty printed JSON to show the
+// human-readable difference between provided JSON documents. It is important
+// to understand that returned format is not a valid JSON and is not meant
+// to be machine readable.
+func Compare(a, b []byte, opts *Options) (Difference, string) {
+	var av, bv interface{}
+	da := json.NewDecoder(bytes.NewReader(a))
+	da.UseNumber()
+	db := json.NewDecoder(bytes.NewReader(b))
+	db.UseNumber()
+	errA := da.Decode(&av)
+	errB := db.Decode(&bv)
+	if errA != nil && errB != nil {
+		return BothArgsAreInvalidJson, "both arguments are invalid json"
+	}
+	if errA != nil {
+		return FirstArgIsInvalidJson, "first argument is invalid json"
+	}
+	if errB != nil {
+		return SecondArgIsInvalidJson, "second argument is invalid json"
+	}
+
+	ctx := context{opts: opts}
+	ctx.printDiff(av, bv)
+	if ctx.lastTag != nil {
+		ctx.buf.WriteString(ctx.lastTag.End)
+	}
+	return ctx.diff, ctx.buf.String()
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 00000000000..c57100a595c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 00000000000..b950e42965f
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+  include:
+  - go: "1.13.x"
+  - go: "1.14.x"
+  - go: "tip"
+    env:
+    - LINT=true
+    - COVERAGE=true
+
+install:
+  - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+  - go get -u github.com/stretchr/testify/...
+
+script:
+  - make test
+  - go build ./...
+  - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+  - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 00000000000..d3bfcf62359
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,63 @@
+Changes by Version
+==================
+
+
+1.2.0 (2020-07-01)
+-------------------
+
+* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
+* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
+* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
+* Add Go module support (#215) -- Zaba505
+* Make SetTag helper types in ext public (#229) -- Blake Edwards
+* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
+* Improve noop impementation (#223) -- chanxuehong
+* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
+* Fix typo in comments (#222) -- meteorlxy
+* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的å°ä¼é¹…
+* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
+
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214) 
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) 
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191) 
+- Update license to Apache 2.0 (#181) 
+- Replace 'golang.org/x/net/context' with 'context' (#176) 
+- Port of Python opentracing/harness/api_check.py to Go (#146) 
+- Fix race condition in MockSpan.Context() (#170) 
+- Add PeerHostIPv4.SetString() (#155)  
+- Add a Noop log field type to log to allow for optional fields (#150)  
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139) 
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments 
+- Address race in nextMockID() (#123) 
+- log: avoid panic marshaling nil error (#131) 
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) 
+- Drop Go 1.5 that fails in Travis (#129) 
+- Add convenience methods Key() and Value() to log.Field 
+- Add convenience methods to log.Field (2 years, 6 months ago) 
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 00000000000..f0027349e83
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 The OpenTracing Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 00000000000..62abb63f58d
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+	go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+	go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+	go fmt ./...
+	golint ./...
+	@# Run again with magic to exit non-zero if golint outputs anything.
+	@! (golint ./... | read dummy)
+	go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 00000000000..6ef1d7c9d27
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+    import "github.com/opentracing/opentracing-go"
+    import ".../some_tracing_impl"
+
+    func main() {
+        opentracing.SetGlobalTracer(
+            // tracing impl specific:
+            some_tracing_impl.New(...),
+        )
+        ...
+    }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+    func xyz(ctx context.Context, ...) {
+        ...
+        span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+        defer span.Finish()
+        span.LogFields(
+            log.String("event", "soft error"),
+            log.String("type", "cache timeout"),
+            log.Int("waited.millis", 1500))
+        ...
+    }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+    func xyz() {
+        ...
+        sp := opentracing.StartSpan("operation_name")
+        defer sp.Finish()
+        ...
+    }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+    func xyz(parentSpan opentracing.Span, ...) {
+        ...
+        sp := opentracing.StartSpan(
+            "operation_name",
+            opentracing.ChildOf(parentSpan.Context()))
+        defer sp.Finish()
+        ...
+    }
+```
+
+#### Serializing to the wire
+
+```go
+    func makeSomeRequest(ctx context.Context) ... {
+        if span := opentracing.SpanFromContext(ctx); span != nil {
+            httpClient := &http.Client{}
+            httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+            // Transmit the span's TraceContext as HTTP headers on our
+            // outbound request.
+            opentracing.GlobalTracer().Inject(
+                span.Context(),
+                opentracing.HTTPHeaders,
+                opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+            resp, err := httpClient.Do(httpReq)
+            ...
+        }
+        ...
+    }
+```
+
+#### Deserializing from the wire
+
+```go
+    http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+        var serverSpan opentracing.Span
+        appSpecificOperationName := ...
+        wireContext, err := opentracing.GlobalTracer().Extract(
+            opentracing.HTTPHeaders,
+            opentracing.HTTPHeadersCarrier(req.Header))
+        if err != nil {
+            // Optionally record something about err here
+        }
+
+        // Create the span referring to the RPC client if available.
+        // If wireContext == nil, a root span will be created.
+        serverSpan = opentracing.StartSpan(
+            appSpecificOperationName,
+            ext.RPCServerOption(wireContext))
+
+        defer serverSpan.Finish()
+
+        ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+        ...
+    }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field.  For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+    func Customer(order *Order) log.Field {
+        if os.Getenv("ENVIRONMENT") == "dev" {
+            return log.String("customer", order.Customer.ID)
+        }
+        return log.Noop()
+    }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
new file mode 100644
index 00000000000..e11977ebe85
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext.go
@@ -0,0 +1,24 @@
+package opentracing
+
+import (
+	"context"
+)
+
+// TracerContextWithSpanExtension is an extension interface that the
+// implementation of the Tracer interface may want to implement. It
+// allows to have some control over the go context when the
+// ContextWithSpan is invoked.
+//
+// The primary purpose of this extension are adapters from opentracing
+// API to some other tracing API.
+type TracerContextWithSpanExtension interface {
+	// ContextWithSpanHook gets called by the ContextWithSpan
+	// function, when the Tracer implementation also implements
+	// this interface. It allows to put extra information into the
+	// context and make it available to the callers of the
+	// ContextWithSpan.
+	//
+	// This hook is invoked before the ContextWithSpan function
+	// actually puts the span into the context.
+	ContextWithSpanHook(ctx context.Context, span Span) context.Context
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
new file mode 100644
index 00000000000..8282bd75846
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go
@@ -0,0 +1,17 @@
+package ext
+
+import (
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// LogError sets the error=true tag on the Span and logs err as an "error" event.
+func LogError(span opentracing.Span, err error, fields ...log.Field) {
+	Error.Set(span, true)
+	ef := []log.Field{
+		log.Event("error"),
+		log.Error(err),
+	}
+	ef = append(ef, fields...)
+	span.LogFields(ef...)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 00000000000..a414b5951f0
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,215 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+//     span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+//    TagName.Set(span, value)
+//
+var (
+	//////////////////////////////////////////////////////////////////////
+	// SpanKind (client/server or producer/consumer)
+	//////////////////////////////////////////////////////////////////////
+
+	// SpanKind hints at relationship between spans, e.g. client/server
+	SpanKind = spanKindTagName("span.kind")
+
+	// SpanKindRPCClient marks a span representing the client-side of an RPC
+	// or other remote call
+	SpanKindRPCClientEnum = SpanKindEnum("client")
+	SpanKindRPCClient     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+	// SpanKindRPCServer marks a span representing the server-side of an RPC
+	// or other remote call
+	SpanKindRPCServerEnum = SpanKindEnum("server")
+	SpanKindRPCServer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+	// SpanKindProducer marks a span representing the producer-side of a
+	// message bus
+	SpanKindProducerEnum = SpanKindEnum("producer")
+	SpanKindProducer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+	// SpanKindConsumer marks a span representing the consumer-side of a
+	// message bus
+	SpanKindConsumerEnum = SpanKindEnum("consumer")
+	SpanKindConsumer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+	//////////////////////////////////////////////////////////////////////
+	// Component name
+	//////////////////////////////////////////////////////////////////////
+
+	// Component is a low-cardinality identifier of the module, library,
+	// or package that is generating a span.
+	Component = StringTagName("component")
+
+	//////////////////////////////////////////////////////////////////////
+	// Sampling hint
+	//////////////////////////////////////////////////////////////////////
+
+	// SamplingPriority determines the priority of sampling this Span.
+	SamplingPriority = Uint16TagName("sampling.priority")
+
+	//////////////////////////////////////////////////////////////////////
+	// Peer tags. These tags can be emitted by either client-side or
+	// server-side to describe the other side/service in a peer-to-peer
+	// communications, like an RPC call.
+	//////////////////////////////////////////////////////////////////////
+
+	// PeerService records the service name of the peer.
+	PeerService = StringTagName("peer.service")
+
+	// PeerAddress records the address name of the peer. This may be a "ip:port",
+	// a bare "hostname", a FQDN or even a database DSN substring
+	// like "mysql://username@127.0.0.1:3306/dbname"
+	PeerAddress = StringTagName("peer.address")
+
+	// PeerHostname records the host name of the peer
+	PeerHostname = StringTagName("peer.hostname")
+
+	// PeerHostIPv4 records IP v4 host address of the peer
+	PeerHostIPv4 = IPv4TagName("peer.ipv4")
+
+	// PeerHostIPv6 records IP v6 host address of the peer
+	PeerHostIPv6 = StringTagName("peer.ipv6")
+
+	// PeerPort records port number of the peer
+	PeerPort = Uint16TagName("peer.port")
+
+	//////////////////////////////////////////////////////////////////////
+	// HTTP Tags
+	//////////////////////////////////////////////////////////////////////
+
+	// HTTPUrl should be the URL of the request being handled in this segment
+	// of the trace, in standard URI format. The protocol is optional.
+	HTTPUrl = StringTagName("http.url")
+
+	// HTTPMethod is the HTTP method of the request, and is case-insensitive.
+	HTTPMethod = StringTagName("http.method")
+
+	// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+	// HTTP response.
+	HTTPStatusCode = Uint16TagName("http.status_code")
+
+	//////////////////////////////////////////////////////////////////////
+	// DB Tags
+	//////////////////////////////////////////////////////////////////////
+
+	// DBInstance is database instance name.
+	DBInstance = StringTagName("db.instance")
+
+	// DBStatement is a database statement for the given database type.
+	// It can be a query or a prepared statement (i.e., before substitution).
+	DBStatement = StringTagName("db.statement")
+
+	// DBType is a database type. For any SQL database, "sql".
+	// For others, the lower-case database category, e.g. "redis"
+	DBType = StringTagName("db.type")
+
+	// DBUser is a username for accessing database.
+	DBUser = StringTagName("db.user")
+
+	//////////////////////////////////////////////////////////////////////
+	// Message Bus Tag
+	//////////////////////////////////////////////////////////////////////
+
+	// MessageBusDestination is an address at which messages can be exchanged
+	MessageBusDestination = StringTagName("message_bus.destination")
+
+	//////////////////////////////////////////////////////////////////////
+	// Error Tag
+	//////////////////////////////////////////////////////////////////////
+
+	// Error indicates that operation represented by the span resulted in an error.
+	Error = BoolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+	span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+	clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+	if r.clientContext != nil {
+		opentracing.ChildOf(r.clientContext).Apply(o)
+	}
+	SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+	return rpcServerOption{client}
+}
+
+// ---
+
+// StringTagName is a common tag name to be set to a string value
+type StringTagName string
+
+// Set adds a string tag to the `span`
+func (tag StringTagName) Set(span opentracing.Span, value string) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint32TagName is a common tag name to be set to a uint32 value
+type Uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint16TagName is a common tag name to be set to a uint16 value
+type Uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// BoolTagName is a common tag name to be set to a bool value
+type BoolTagName string
+
+// Set adds a bool tag to the `span`
+func (tag BoolTagName) Set(span opentracing.Span, value bool) {
+	span.SetTag(string(tag), value)
+}
+
+// IPv4TagName is a common tag name to be set to an ipv4 value
+type IPv4TagName string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
+	span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
+	span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 00000000000..4f7066a925c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+	tracer       Tracer
+	isRegistered bool
+}
+
+var (
+	globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+	globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+	return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+	return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+	SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+	return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod
new file mode 100644
index 00000000000..bf48bb5d73f
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.mod
@@ -0,0 +1,5 @@
+module github.com/opentracing/opentracing-go
+
+go 1.14
+
+require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum
new file mode 100644
index 00000000000..4347755afe8
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.sum
@@ -0,0 +1,7 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 00000000000..1831bc9b263
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,65 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// the span. If span is nil, a new context without an active span is returned.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+	if span != nil {
+		if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
+			ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
+		}
+	}
+	return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+	val := ctx.Value(activeSpanKey)
+	if sp, ok := val.(Span); ok {
+		return sp
+	}
+	return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+//    SomeFunction(ctx context.Context, ...) {
+//        sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+//        defer sp.Finish()
+//        ...
+//    }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+	return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using  a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+	if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+		opts = append(opts, ChildOf(parentSpan.Context()))
+	}
+	span := tracer.StartSpan(operationName, opts...)
+	return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 00000000000..f222ded797c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,282 @@
+package log
+
+import (
+	"fmt"
+	"math"
+)
+
+type fieldType int
+
+const (
+	stringType fieldType = iota
+	boolType
+	intType
+	int32Type
+	uint32Type
+	int64Type
+	uint64Type
+	float32Type
+	float64Type
+	errorType
+	objectType
+	lazyLoggerType
+	noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+	key          string
+	fieldType    fieldType
+	numericVal   int64
+	stringVal    string
+	interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+	return Field{
+		key:       key,
+		fieldType: stringType,
+		stringVal: val,
+	}
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+	var numericVal int64
+	if val {
+		numericVal = 1
+	}
+	return Field{
+		key:        key,
+		fieldType:  boolType,
+		numericVal: numericVal,
+	}
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+	return Field{
+		key:        key,
+		fieldType:  intType,
+		numericVal: int64(val),
+	}
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+	return Field{
+		key:        key,
+		fieldType:  int32Type,
+		numericVal: int64(val),
+	}
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+	return Field{
+		key:        key,
+		fieldType:  int64Type,
+		numericVal: val,
+	}
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+	return Field{
+		key:        key,
+		fieldType:  uint32Type,
+		numericVal: int64(val),
+	}
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+	return Field{
+		key:        key,
+		fieldType:  uint64Type,
+		numericVal: int64(val),
+	}
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+	return Field{
+		key:        key,
+		fieldType:  float32Type,
+		numericVal: int64(math.Float32bits(val)),
+	}
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+	return Field{
+		key:        key,
+		fieldType:  float64Type,
+		numericVal: int64(math.Float64bits(val)),
+	}
+}
+
+// Error adds an error with the key "error.object" to a Span.LogFields() record
+func Error(err error) Field {
+	return Field{
+		key:          "error.object",
+		fieldType:    errorType,
+		interfaceVal: err,
+	}
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+// Please pass in an immutable object, otherwise there may be concurrency issues.
+// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
+// Because span is sent asynchronously, it is possible that this map will also be modified.
+func Object(key string, obj interface{}) Field {
+	return Field{
+		key:          key,
+		fieldType:    objectType,
+		interfaceVal: obj,
+	}
+}
+
+// Event creates a string-valued Field for span logs with key="event" and value=val.
+func Event(val string) Field {
+	return String("event", val)
+}
+
+// Message creates a string-valued Field for span logs with key="message" and value=val.
+func Message(val string) Field {
+	return String("message", val)
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+	return Field{
+		fieldType:    lazyLoggerType,
+		interfaceVal: ll,
+	}
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+//     func customerField(order *Order) log.Field {
+//          if os.Getenv("ENVIRONMENT") == "dev" {
+//              return log.String("customer", order.Customer.ID)
+//          }
+//          return log.Noop()
+//     }
+//
+//     span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+	return Field{
+		fieldType: noopType,
+	}
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+	EmitString(key, value string)
+	EmitBool(key string, value bool)
+	EmitInt(key string, value int)
+	EmitInt32(key string, value int32)
+	EmitInt64(key string, value int64)
+	EmitUint32(key string, value uint32)
+	EmitUint64(key string, value uint64)
+	EmitFloat32(key string, value float32)
+	EmitFloat64(key string, value float64)
+	EmitObject(key string, value interface{})
+	EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+	switch lf.fieldType {
+	case stringType:
+		visitor.EmitString(lf.key, lf.stringVal)
+	case boolType:
+		visitor.EmitBool(lf.key, lf.numericVal != 0)
+	case intType:
+		visitor.EmitInt(lf.key, int(lf.numericVal))
+	case int32Type:
+		visitor.EmitInt32(lf.key, int32(lf.numericVal))
+	case int64Type:
+		visitor.EmitInt64(lf.key, int64(lf.numericVal))
+	case uint32Type:
+		visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+	case uint64Type:
+		visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+	case float32Type:
+		visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+	case float64Type:
+		visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+	case errorType:
+		if err, ok := lf.interfaceVal.(error); ok {
+			visitor.EmitString(lf.key, err.Error())
+		} else {
+			visitor.EmitString(lf.key, "")
+		}
+	case objectType:
+		visitor.EmitObject(lf.key, lf.interfaceVal)
+	case lazyLoggerType:
+		visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+	case noopType:
+		// intentionally left blank
+	}
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+	return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+	switch lf.fieldType {
+	case stringType:
+		return lf.stringVal
+	case boolType:
+		return lf.numericVal != 0
+	case intType:
+		return int(lf.numericVal)
+	case int32Type:
+		return int32(lf.numericVal)
+	case int64Type:
+		return int64(lf.numericVal)
+	case uint32Type:
+		return uint32(lf.numericVal)
+	case uint64Type:
+		return uint64(lf.numericVal)
+	case float32Type:
+		return math.Float32frombits(uint32(lf.numericVal))
+	case float64Type:
+		return math.Float64frombits(uint64(lf.numericVal))
+	case errorType, objectType, lazyLoggerType:
+		return lf.interfaceVal
+	case noopType:
+		return nil
+	default:
+		return nil
+	}
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+	return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 00000000000..d57e28aa57f
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,61 @@
+package log
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+	if len(keyValues)%2 != 0 {
+		return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+	}
+	fields := make([]Field, len(keyValues)/2)
+	for i := 0; i*2 < len(keyValues); i++ {
+		key, ok := keyValues[i*2].(string)
+		if !ok {
+			return nil, fmt.Errorf(
+				"non-string key (pair #%d): %T",
+				i, keyValues[i*2])
+		}
+		switch typedVal := keyValues[i*2+1].(type) {
+		case bool:
+			fields[i] = Bool(key, typedVal)
+		case string:
+			fields[i] = String(key, typedVal)
+		case int:
+			fields[i] = Int(key, typedVal)
+		case int8:
+			fields[i] = Int32(key, int32(typedVal))
+		case int16:
+			fields[i] = Int32(key, int32(typedVal))
+		case int32:
+			fields[i] = Int32(key, typedVal)
+		case int64:
+			fields[i] = Int64(key, typedVal)
+		case uint:
+			fields[i] = Uint64(key, uint64(typedVal))
+		case uint64:
+			fields[i] = Uint64(key, typedVal)
+		case uint8:
+			fields[i] = Uint32(key, uint32(typedVal))
+		case uint16:
+			fields[i] = Uint32(key, uint32(typedVal))
+		case uint32:
+			fields[i] = Uint32(key, typedVal)
+		case float32:
+			fields[i] = Float32(key, typedVal)
+		case float64:
+			fields[i] = Float64(key, typedVal)
+		default:
+			if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
+				fields[i] = String(key, "nil")
+				continue
+			}
+			// When in doubt, coerce to a string
+			fields[i] = String(key, fmt.Sprint(typedVal))
+		}
+	}
+	return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 00000000000..f9b680a213d
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+	defaultNoopSpanContext SpanContext = noopSpanContext{}
+	defaultNoopSpan        Span        = noopSpan{}
+	defaultNoopTracer      Tracer      = NoopTracer{}
+)
+
+const (
+	emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext                                  { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span                   { return n }
+func (n noopSpan) BaggageItem(key string) string                         { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span             { return n }
+func (n noopSpan) LogFields(fields ...log.Field)                         {}
+func (n noopSpan) LogKV(keyVals ...interface{})                          {}
+func (n noopSpan) Finish()                                               {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions)                  {}
+func (n noopSpan) SetOperationName(operationName string) Span            { return n }
+func (n noopSpan) Tracer() Tracer                                        { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string)                                 {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData)                                      {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+	return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+	return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+	return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 00000000000..b0c275eb05e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+	"errors"
+	"net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+	// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+	// Tracer.Extract() is not recognized by the Tracer implementation.
+	ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+	// ErrSpanContextNotFound occurs when the `carrier` passed to
+	// Tracer.Extract() is valid and uncorrupted but has insufficient
+	// information to extract a SpanContext.
+	ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+	// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+	// operate on a SpanContext which it is not prepared to handle (for
+	// example, since it was created by a different tracer implementation).
+	ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+	// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+	// implementations expect a different type of `carrier` than they are
+	// given.
+	ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+	// ErrSpanContextCorrupted occurs when the `carrier` passed to
+	// Tracer.Extract() is of the expected type but is corrupted.
+	ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+	// Binary represents SpanContexts as opaque binary data.
+	//
+	// For Tracer.Inject(): the carrier must be an `io.Writer`.
+	//
+	// For Tracer.Extract(): the carrier must be an `io.Reader`.
+	Binary BuiltinFormat = iota
+
+	// TextMap represents SpanContexts as key:value string pairs.
+	//
+	// Unlike HTTPHeaders, the TextMap format does not restrict the key or
+	// value character sets in any way.
+	//
+	// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+	//
+	// For Tracer.Extract(): the carrier must be a `TextMapReader`.
+	TextMap
+
+	// HTTPHeaders represents SpanContexts as HTTP header string pairs.
+	//
+	// Unlike TextMap, the HTTPHeaders format requires that the keys and values
+	// be valid as HTTP headers as-is (i.e., character casing may be unstable
+	// and special characters are disallowed in keys, values should be
+	// URL-escaped, etc).
+	//
+	// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+	//
+	// For Tracer.Extract(): the carrier must be a `TextMapReader`.
+	//
+	// See HTTPHeadersCarrier for an implementation of both TextMapWriter
+	// and TextMapReader that defers to an http.Header instance for storage.
+	// For example, Inject():
+	//
+	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//    err := span.Tracer().Inject(
+	//        span.Context(), opentracing.HTTPHeaders, carrier)
+	//
+	// Or Extract():
+	//
+	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//    clientContext, err := tracer.Extract(
+	//        opentracing.HTTPHeaders, carrier)
+	//
+	HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+	// Set a key:value pair to the carrier. Multiple calls to Set() for the
+	// same key leads to undefined behavior.
+	//
+	// NOTE: The backing store for the TextMapWriter may contain data unrelated
+	// to SpanContext. As such, Inject() and Extract() implementations that
+	// call the TextMapWriter and TextMapReader interfaces must agree on a
+	// prefix or other convention to distinguish their own key:value pairs.
+	Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+	// ForeachKey returns TextMap contents via repeated calls to the `handler`
+	// function. If any call to `handler` returns a non-nil error, ForeachKey
+	// terminates and returns that error.
+	//
+	// NOTE: The backing store for the TextMapReader may contain data unrelated
+	// to SpanContext. As such, Inject() and Extract() implementations that
+	// call the TextMapWriter and TextMapReader interfaces must agree on a
+	// prefix or other convention to distinguish their own key:value pairs.
+	//
+	// The "foreach" callback pattern reduces unnecessary copying in some cases
+	// and also allows implementations to hold locks while the map is read.
+	ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, v := range c {
+		if err := handler(k, v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+	c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+//     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+//     err := tracer.Inject(
+//         span.Context(),
+//         opentracing.HTTPHeaders,
+//         carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+	h := http.Header(c)
+	h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, vals := range c {
+		for _, v := range vals {
+			if err := handler(k, v); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 00000000000..0d3fb534183
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a  tuple).
+type SpanContext interface {
+	// ForeachBaggageItem grants access to all baggage items stored in the
+	// SpanContext.
+	// The handler function will be called for each baggage key/value pair.
+	// The ordering of items is not guaranteed.
+	//
+	// The bool return value indicates if the handler wants to continue iterating
+	// through the rest of the baggage items; for example if the handler is trying to
+	// find some baggage item by pattern matching the name, it can return false
+	// as soon as the item is found to stop further iterations.
+	ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+	// Sets the end timestamp and finalizes Span state.
+	//
+	// With the exception of calls to Context() (which are always allowed),
+	// Finish() must be the last call made to any span instance, and to do
+	// otherwise leads to undefined behavior.
+	Finish()
+	// FinishWithOptions is like Finish() but with explicit control over
+	// timestamps and log data.
+	FinishWithOptions(opts FinishOptions)
+
+	// Context() yields the SpanContext for this Span. Note that the return
+	// value of Context() is still valid after a call to Span.Finish(), as is
+	// a call to Span.Context() after a call to Span.Finish().
+	Context() SpanContext
+
+	// Sets or changes the operation name.
+	//
+	// Returns a reference to this Span for chaining.
+	SetOperationName(operationName string) Span
+
+	// Adds a tag to the span.
+	//
+	// If there is a pre-existing tag set for `key`, it is overwritten.
+	//
+	// Tag values can be numeric types, strings, or bools. The behavior of
+	// other tag value types is undefined at the OpenTracing level. If a
+	// tracing system does not know how to handle a particular value type, it
+	// may ignore the tag, but shall not panic.
+	//
+	// Returns a reference to this Span for chaining.
+	SetTag(key string, value interface{}) Span
+
+	// LogFields is an efficient and type-checked way to record key:value
+	// logging data about a Span, though the programming interface is a little
+	// more verbose than LogKV(). Here's an example:
+	//
+	//    span.LogFields(
+	//        log.String("event", "soft error"),
+	//        log.String("type", "cache timeout"),
+	//        log.Int("waited.millis", 1500))
+	//
+	// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+	LogFields(fields ...log.Field)
+
+	// LogKV is a concise, readable way to record key:value logging data about
+	// a Span, though unfortunately this also makes it less efficient and less
+	// type-safe than LogFields(). Here's an example:
+	//
+	//    span.LogKV(
+	//        "event", "soft error",
+	//        "type", "cache timeout",
+	//        "waited.millis", 1500)
+	//
+	// For LogKV (as opposed to LogFields()), the parameters must appear as
+	// key-value pairs, like
+	//
+	//    span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+	//
+	// The keys must all be strings. The values may be strings, numeric types,
+	// bools, Go error instances, or arbitrary structs.
+	//
+	// (Note to implementors: consider the log.InterleavedKVToFields() helper)
+	LogKV(alternatingKeyValues ...interface{})
+
+	// SetBaggageItem sets a key:value pair on this Span and its SpanContext
+	// that also propagates to descendants of this Span.
+	//
+	// SetBaggageItem() enables powerful functionality given a full-stack
+	// opentracing integration (e.g., arbitrary application data from a mobile
+	// app can make it, transparently, all the way into the depths of a storage
+	// system), and with it some powerful costs: use this feature with care.
+	//
+	// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+	// *future* causal descendants of the associated Span.
+	//
+	// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+	// value is copied into every local *and remote* child of the associated
+	// Span, and that can add up to a lot of network and cpu overhead.
+	//
+	// Returns a reference to this Span for chaining.
+	SetBaggageItem(restrictedKey, value string) Span
+
+	// Gets the value for a baggage item given its key. Returns the empty string
+	// if the value isn't found in this Span.
+	BaggageItem(restrictedKey string) string
+
+	// Provides access to the Tracer that created this Span.
+	Tracer() Tracer
+
+	// Deprecated: use LogFields or LogKV
+	LogEvent(event string)
+	// Deprecated: use LogFields or LogKV
+	LogEventWithPayload(event string, payload interface{})
+	// Deprecated: use LogFields or LogKV
+	Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+	Timestamp time.Time
+	Fields    []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+	// FinishTime overrides the Span's finish time, or implicitly becomes
+	// time.Now() if FinishTime.IsZero().
+	//
+	// FinishTime must resolve to a timestamp that's >= the Span's StartTime
+	// (per StartSpanOptions).
+	FinishTime time.Time
+
+	// LogRecords allows the caller to specify the contents of many LogFields()
+	// calls with a single slice. May be nil.
+	//
+	// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+	// be set explicitly). Also, they must be >= the Span's start timestamp and
+	// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+	// behavior of FinishWithOptions() is undefined.
+	//
+	// If specified, the caller hands off ownership of LogRecords at
+	// FinishWithOptions() invocation time.
+	//
+	// If specified, the (deprecated) BulkLogData must be nil or empty.
+	LogRecords []LogRecord
+
+	// BulkLogData is DEPRECATED.
+	BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+	Timestamp time.Time
+	Event     string
+	Payload   interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+	var literalTimestamp time.Time
+	if ld.Timestamp.IsZero() {
+		literalTimestamp = time.Now()
+	} else {
+		literalTimestamp = ld.Timestamp
+	}
+	rval := LogRecord{
+		Timestamp: literalTimestamp,
+	}
+	if ld.Payload == nil {
+		rval.Fields = []log.Field{
+			log.String("event", ld.Event),
+		}
+	} else {
+		rval.Fields = []log.Field{
+			log.String("event", ld.Event),
+			log.Object("payload", ld.Payload),
+		}
+	}
+	return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 00000000000..715f0cedfb6
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,304 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+	// Create, start, and return a new Span with the given `operationName` and
+	// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+	// from the "functional options" pattern, per
+	// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+	//
+	// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+	// opentracing.FollowsFrom()) becomes the root of its own trace.
+	//
+	// Examples:
+	//
+	//     var tracer opentracing.Tracer = ...
+	//
+	//     // The root-span case:
+	//     sp := tracer.StartSpan("GetFeed")
+	//
+	//     // The vanilla child span case:
+	//     sp := tracer.StartSpan(
+	//         "GetFeed",
+	//         opentracing.ChildOf(parentSpan.Context()))
+	//
+	//     // All the bells and whistles:
+	//     sp := tracer.StartSpan(
+	//         "GetFeed",
+	//         opentracing.ChildOf(parentSpan.Context()),
+	//         opentracing.Tag{"user_agent", loggedReq.UserAgent},
+	//         opentracing.StartTime(loggedReq.Timestamp),
+	//     )
+	//
+	StartSpan(operationName string, opts ...StartSpanOption) Span
+
+	// Inject() takes the `sm` SpanContext instance and injects it for
+	// propagation within `carrier`. The actual type of `carrier` depends on
+	// the value of `format`.
+	//
+	// OpenTracing defines a common set of `format` values (see BuiltinFormat),
+	// and each has an expected carrier type.
+	//
+	// Other packages may declare their own `format` values, much like the keys
+	// used by `context.Context` (see https://godoc.org/context#WithValue).
+	//
+	// Example usage (sans error handling):
+	//
+	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//     err := tracer.Inject(
+	//         span.Context(),
+	//         opentracing.HTTPHeaders,
+	//         carrier)
+	//
+	// NOTE: All opentracing.Tracer implementations MUST support all
+	// BuiltinFormats.
+	//
+	// Implementations may return opentracing.ErrUnsupportedFormat if `format`
+	// is not supported by (or not known by) the implementation.
+	//
+	// Implementations may return opentracing.ErrInvalidCarrier or any other
+	// implementation-specific error if the format is supported but injection
+	// fails anyway.
+	//
+	// See Tracer.Extract().
+	Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+	// Extract() returns a SpanContext instance given `format` and `carrier`.
+	//
+	// OpenTracing defines a common set of `format` values (see BuiltinFormat),
+	// and each has an expected carrier type.
+	//
+	// Other packages may declare their own `format` values, much like the keys
+	// used by `context.Context` (see
+	// https://godoc.org/golang.org/x/net/context#WithValue).
+	//
+	// Example usage (with StartSpan):
+	//
+	//
+	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+	//
+	//     // ... assuming the ultimate goal here is to resume the trace with a
+	//     // server-side Span:
+	//     var serverSpan opentracing.Span
+	//     if err == nil {
+	//         span = tracer.StartSpan(
+	//             rpcMethodName, ext.RPCServerOption(clientContext))
+	//     } else {
+	//         span = tracer.StartSpan(rpcMethodName)
+	//     }
+	//
+	//
+	// NOTE: All opentracing.Tracer implementations MUST support all
+	// BuiltinFormats.
+	//
+	// Return values:
+	//  - A successful Extract returns a SpanContext instance and a nil error
+	//  - If there was simply no SpanContext to extract in `carrier`, Extract()
+	//    returns (nil, opentracing.ErrSpanContextNotFound)
+	//  - If `format` is unsupported or unrecognized, Extract() returns (nil,
+	//    opentracing.ErrUnsupportedFormat)
+	//  - If there are more fundamental problems with the `carrier` object,
+	//    Extract() may return opentracing.ErrInvalidCarrier,
+	//    opentracing.ErrSpanContextCorrupted, or implementation-specific
+	//    errors.
+	//
+	// See Tracer.Inject().
+	Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+//         sso := opentracing.StartSpanOptions{}
+//         for _, o := range opts {
+//             o.Apply(&sso)
+//         }
+//         ...
+//     }
+//
+type StartSpanOptions struct {
+	// Zero or more causal references to other Spans (via their SpanContext).
+	// If empty, start a "root" Span (i.e., start a new trace).
+	References []SpanReference
+
+	// StartTime overrides the Span's start time, or implicitly becomes
+	// time.Now() if StartTime.IsZero().
+	StartTime time.Time
+
+	// Tags may have zero or more entries; the restrictions on map values are
+	// identical to those for Span.SetTag(). May be nil.
+	//
+	// If specified, the caller hands off ownership of Tags at
+	// StartSpan() invocation time.
+	Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+	Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+	// ChildOfRef refers to a parent Span that caused *and* somehow depends
+	// upon the new child Span. Often (but not always), the parent Span cannot
+	// finish until the child Span does.
+	//
+	// An timing diagram for a ChildOfRef that's blocked on the new Span:
+	//
+	//     [-Parent Span---------]
+	//          [-Child Span----]
+	//
+	// See http://opentracing.io/spec/
+	//
+	// See opentracing.ChildOf()
+	ChildOfRef SpanReferenceType = iota
+
+	// FollowsFromRef refers to a parent Span that does not depend in any way
+	// on the result of the new child Span. For instance, one might use
+	// FollowsFromRefs to describe pipeline stages separated by queues,
+	// or a fire-and-forget cache insert at the tail end of a web request.
+	//
+	// A FollowsFromRef Span is part of the same logical trace as the new Span:
+	// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+	//
+	// All of the following could be valid timing diagrams for children that
+	// "FollowFrom" a parent.
+	//
+	//     [-Parent Span-]  [-Child Span-]
+	//
+	//
+	//     [-Parent Span--]
+	//      [-Child Span-]
+	//
+	//
+	//     [-Parent Span-]
+	//                 [-Child Span-]
+	//
+	// See http://opentracing.io/spec/
+	//
+	// See opentracing.FollowsFrom()
+	FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships.  If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+//     sc, _ := tracer.Extract(someFormat, someCarrier)
+//     span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+	Type              SpanReferenceType
+	ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+	if r.ReferencedContext != nil {
+		o.References = append(o.References, r)
+	}
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+	return SpanReference{
+		Type:              ChildOfRef,
+		ReferencedContext: sc,
+	}
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+	return SpanReference{
+		Type:              FollowsFromRef,
+		ReferencedContext: sc,
+	}
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+	o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+	if o.Tags == nil {
+		o.Tags = make(map[string]interface{})
+	}
+	for k, v := range t {
+		o.Tags[k] = v
+	}
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+//   or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+	Key   string
+	Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+	if o.Tags == nil {
+		o.Tags = make(map[string]interface{})
+	}
+	o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+	s.SetTag(t.Key, t.Value)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/.gitattributes b/vendor/github.com/openzipkin/zipkin-go/.gitattributes
new file mode 100644
index 00000000000..fcadb2cf979
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/.gitattributes
@@ -0,0 +1 @@
+* text eol=lf
diff --git a/vendor/github.com/openzipkin/zipkin-go/.gitignore b/vendor/github.com/openzipkin/zipkin-go/.gitignore
new file mode 100644
index 00000000000..11b90db8d96
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea
diff --git a/vendor/github.com/openzipkin/zipkin-go/.golangci.yml b/vendor/github.com/openzipkin/zipkin-go/.golangci.yml
new file mode 100644
index 00000000000..8b1cfd0e189
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/.golangci.yml
@@ -0,0 +1,30 @@
+run:
+  deadline: 5m
+
+linters:
+  disable-all: true
+  enable:
+    - dupl
+    - goconst
+    - gocyclo
+    - gofmt
+    - golint
+    - govet
+    - ineffassign
+    - interfacer
+    - lll
+    - misspell
+    - nakedret
+    - structcheck
+    - unparam
+    - varcheck
+
+linters-settings:
+  dupl:
+    threshold: 400 
+  lll:   
+    line-length: 170
+  gocyclo:
+    min-complexity: 15
+  golint:
+    min-confidence: 0.85
\ No newline at end of file
diff --git a/vendor/github.com/openzipkin/zipkin-go/.travis.yml b/vendor/github.com/openzipkin/zipkin-go/.travis.yml
new file mode 100644
index 00000000000..d440ed07f5d
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/.travis.yml
@@ -0,0 +1,41 @@
+language: go
+dist: trusty
+
+services:
+  - rabbitmq
+
+sudo: false
+
+matrix:
+  include:
+    - go: "1.9.x"
+    - go: "1.10.x"
+    - go: "1.11.x"
+      env:
+        - GO111MODULE=on
+    - go: "1.12.x"
+      env:
+        - GO111MODULE=on
+    - go: "tip"
+      env:
+        - GO111MODULE=on
+
+before_install:
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/mattn/goveralls
+
+install:
+  - go get -d -t ./...
+  - go get -u golang.org/x/lint/golint
+
+script:
+  - make test vet lint bench
+  - $GOPATH/bin/goveralls -service=travis-ci
+
+notifications:
+  webhooks:
+    urls:
+      - https://webhooks.gitter.im/e/ead3c37d57527214e9f2
+      - https://webhooks.gitter.im/e/e57478303f87ecd7bffc
+    on_success: change
+    on_failure: always
diff --git a/vendor/github.com/openzipkin/zipkin-go/LICENSE b/vendor/github.com/openzipkin/zipkin-go/LICENSE
new file mode 100644
index 00000000000..2ff7224635f
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction,
+and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by
+the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all
+other entities that control, are controlled by, or are under common
+control with that entity. For the purposes of this definition,
+"control" means (i) the power, direct or indirect, to cause the
+direction or management of such entity, whether by contract or
+otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity
+exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation
+source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical
+transformation or translation of a Source form, including but
+not limited to compiled object code, generated documentation,
+and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or
+Object form, made available under the License, as indicated by a
+copyright notice that is included in or attached to the work
+(an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object
+form, that is based on (or derived from) the Work and for which the
+editorial revisions, annotations, elaborations, or other modifications
+represent, as a whole, an original work of authorship. For the purposes
+of this License, Derivative Works shall not include works that remain
+separable from, or merely link (or bind by name) to the interfaces of,
+the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including
+the original version of the Work and any modifications or additions
+to that Work or Derivative Works thereof, that is intentionally
+submitted to Licensor for inclusion in the Work by the copyright owner
+or by an individual or Legal Entity authorized to submit on behalf of
+the copyright owner. For the purposes of this definition, "submitted"
+means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems,
+and issue tracking systems that are managed by, or on behalf of, the
+Licensor for the purpose of discussing and improving the Work, but
+excluding communication that is conspicuously marked or otherwise
+designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity
+on behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the
+Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made,
+use, offer to sell, sell, import, and otherwise transfer the Work,
+where such license applies only to those patent claims licensable
+by such Contributor that are necessarily infringed by their
+Contribution(s) alone or by combination of their Contribution(s)
+with the Work to which such Contribution(s) was submitted. If You
+institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work
+or a Contribution incorporated within the Work constitutes direct
+or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate
+as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+Work or Derivative Works thereof in any medium, with or without
+modifications, and in Source or Object form, provided that You
+meet the following conditions:
+
+(a) You must give any other recipients of the Work or
+Derivative Works a copy of this License; and
+
+(b) You must cause any modified files to carry prominent notices
+stating that You changed the files; and
+
+(c) You must retain, in the Source form of any Derivative Works
+that You distribute, all copyright, patent, trademark, and
+attribution notices from the Source form of the Work,
+excluding those notices that do not pertain to any part of
+the Derivative Works; and
+
+(d) If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must
+include a readable copy of the attribution notices contained
+within such NOTICE file, excluding those notices that do not
+pertain to any part of the Derivative Works, in at least one
+of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or,
+within a display generated by the Derivative Works, if and
+wherever such third-party notices normally appear. The contents
+of the NOTICE file are for informational purposes only and
+do not modify the License. You may add Your own attribution
+notices within Derivative Works that You distribute, alongside
+or as an addendum to the NOTICE text from the Work, provided
+that such additional attribution notices cannot be construed
+as modifying the License.
+
+You may add Your own copyright statement to Your modifications and
+may provide additional or different license terms and conditions
+for use, reproduction, or distribution of Your modifications, or
+for any such Derivative Works as a whole, provided Your use,
+reproduction, and distribution of the Work otherwise complies with
+the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+any Contribution intentionally submitted for inclusion in the Work
+by You to the Licensor shall be under the terms and conditions of
+this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify
+the terms of any separate license agreement you may have executed
+with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor,
+except as required for reasonable and customary use in describing the
+origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+agreed to in writing, Licensor provides the Work (and each
+Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied, including, without limitation, any warranties or conditions
+of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any
+risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+whether in tort (including negligence), contract, or otherwise,
+unless required by applicable law (such as deliberate and grossly
+negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a
+result of this License or out of the use or inability to use the
+Work (including but not limited to damages for loss of goodwill,
+work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor
+has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+the Work or Derivative Works thereof, You may choose to offer,
+and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this
+License. However, in accepting such obligations, You may act only
+on Your own behalf and on Your sole responsibility, not on behalf
+of any other Contributor, and only if You agree to indemnify,
+defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason
+of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following
+boilerplate notice, with the fields enclosed by brackets "{}"
+replaced with your own identifying information. (Don't include
+the brackets!)  The text should be enclosed in the appropriate
+comment syntax for the file format. We also recommend that a
+file or class name and description of purpose be included on the
+same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+Copyright 2017 The OpenZipkin Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/openzipkin/zipkin-go/Makefile b/vendor/github.com/openzipkin/zipkin-go/Makefile
new file mode 100644
index 00000000000..b4271c61404
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/Makefile
@@ -0,0 +1,29 @@
+
+.DEFAULT_GOAL := test
+
+.PHONY: test
+test:
+	go test -v -race -cover ./...
+
+.PHONY: bench
+bench:
+	go test -v -run - -bench . -benchmem ./...
+
+.PHONY: protoc
+protoc:
+	protoc --go_out=. proto/v2/zipkin.proto
+	protoc --go_out=plugins=grpc:. proto/testing/service.proto
+
+.PHONY: lint
+lint:
+	# Ignore grep's exit code since no match returns 1.
+	echo 'linting...' ; golint ./...
+
+.PHONY: vet
+vet:
+	go vet ./...
+
+.PHONY: all
+all: vet lint test bench
+
+.PHONY: example
diff --git a/vendor/github.com/openzipkin/zipkin-go/README.md b/vendor/github.com/openzipkin/zipkin-go/README.md
new file mode 100644
index 00000000000..a36416f8abf
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/README.md
@@ -0,0 +1,102 @@
+# Zipkin Library for Go
+
+[![Travis CI](https://travis-ci.org/openzipkin/zipkin-go.svg?branch=master)](https://travis-ci.org/openzipkin/zipkin-go)
+[![CircleCI](https://circleci.com/gh/openzipkin/zipkin-go.svg?style=shield)](https://circleci.com/gh/openzipkin/zipkin-go)
+[![Appveyor CI](https://ci.appveyor.com/api/projects/status/1d0e5k96g10ajl63/branch/master?svg=true)](https://ci.appveyor.com/project/basvanbeek/zipkin-go)
+[![Coverage Status](https://img.shields.io/coveralls/github/openzipkin/zipkin-go.svg)](https://coveralls.io/github/openzipkin/zipkin-go?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/openzipkin/zipkin-go)](https://goreportcard.com/report/github.com/openzipkin/zipkin-go)
+[![GoDoc](https://godoc.org/github.com/openzipkin/zipkin-go?status.svg)](https://godoc.org/github.com/openzipkin/zipkin-go)
+[![Gitter chat](https://badges.gitter.im/openzipkin/zipkin.svg)](https://gitter.im/openzipkin/zipkin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Sourcegraph](https://sourcegraph.com/github.com/openzipkin/zipkin-go/-/badge.svg)](https://sourcegraph.com/github.com/openzipkin/zipkin-go?badge)
+
+Zipkin Go is the official Go Tracer implementation for Zipkin, supported by the
+OpenZipkin community.
+
+## package organization
+`zipkin-go` is built with interoperability in mind within the OpenZipkin
+community and even 3rd parties, the library consists of several packages.
+
+The main tracing implementation can be found in the root folder of this
+repository. Reusable parts not considered core implementation or deemed
+beneficiary for usage by others are placed in their own packages within this
+repository.
+
+### model
+This library implements the Zipkin V2 Span Model which is available in the model
+package. It contains a Go data model compatible with the Zipkin V2 API and can
+automatically sanitize, parse and (de)serialize to and from the required JSON
+representation as used by the official Zipkin V2 Collectors.
+
+### propagation
+The propagation package and B3 subpackage hold the logic for propagating
+SpanContext (span identifiers and sampling flags) between services participating
+in traces. Currently Zipkin B3 Propagation is supported for HTTP and GRPC.
+
+### middleware
+The middleware subpackages contain officially supported middleware handlers and
+tracing wrappers.
+
+#### http
+An easy to use http.Handler middleware for tracing server side requests is
+provided. This allows one to use this middleware in applications using
+standard library servers as well as most available higher level frameworks. Some
+frameworks will have their own instrumentation and middleware that maps better
+for their ecosystem.
+
+For HTTP client operations `NewTransport` can return a `http.RoundTripper`
+implementation that can either wrap the standard http.Client's Transport or a
+custom provided one and add per request tracing. Since HTTP Requests can have
+one or multiple redirects it is advisable to always enclose HTTP Client calls
+with a `Span` either around the `*http.Client` call level or parent function
+level.
+
+For convenience `NewClient` is provided which returns a HTTP Client which embeds
+`*http.Client` and provides an `application span` around the HTTP calls when
+calling the `DoWithAppSpan()` method.
+
+#### grpc
+Easy to use grpc.StatsHandler middleware are provided for tracing gRPC server and
+client requests. 
+
+For a server, pass `NewServerHandler` when calling `NewServer`, e.g.,
+
+```go
+import (
+	"google.golang.org/grpc"
+	zipkingrpc "github.com/openzipkin/zipkin-go/middleware/grpc"
+)
+
+server = grpc.NewServer(grpc.StatsHandler(zipkingrpc.NewServerHandler(tracer)))
+```
+
+For a client, pass `NewClientHandler` when calling `Dial`, e.g.,
+
+```go
+import (
+	"google.golang.org/grpc"
+	zipkingrpc "github.com/openzipkin/zipkin-go/middleware/grpc"
+)
+
+conn, err = grpc.Dial(addr, grpc.WithStatsHandler(zipkingrpc.NewClientHandler(tracer)))
+```
+
+### reporter
+The reporter package holds the interface which the various Reporter
+implementations use. It is exported into its own package as it can be used by
+3rd parties to use these Reporter packages in their own libraries for exporting
+to the Zipkin ecosystem. The `zipkin-go` tracer also uses the interface to
+accept 3rd party Reporter implementations.
+
+#### HTTP Reporter
+Most common Reporter type used by Zipkin users transporting Spans to the Zipkin
+server using JSON over HTTP. The reporter holds a buffer and reports to the
+backend asynchronously.
+
+#### Kafka Reporter
+High performance Reporter transporting Spans to the Zipkin server using a Kafka
+Producer digesting JSON V2 Spans. The reporter uses the
+[Sarama async producer](https://godoc.org/github.com/Shopify/sarama#AsyncProducer)
+underneath.
+
+## usage and examples
+[HTTP Server Example](example_httpserver_test.go)
diff --git a/vendor/github.com/openzipkin/zipkin-go/appveyor.yml b/vendor/github.com/openzipkin/zipkin-go/appveyor.yml
new file mode 100644
index 00000000000..daaee030ada
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/appveyor.yml
@@ -0,0 +1,20 @@
+version: v1.0.0.{build}
+
+platform: x64
+
+clone_folder: c:\gopath\src\github.com\openzipkin\zipkin-go
+
+environment:
+  GOPATH: c:\gopath
+  GO111MODULE: on
+  GOFLAGS: -mod=readonly
+
+install:
+  - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+  - go version
+  - go env
+
+build_script:
+  - go vet ./...
+  - go test -v -race -cover ./...
+  - go test -v -run - -bench . -benchmem ./...
diff --git a/vendor/github.com/openzipkin/zipkin-go/circle.yml b/vendor/github.com/openzipkin/zipkin-go/circle.yml
new file mode 100644
index 00000000000..4e8b623d473
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/circle.yml
@@ -0,0 +1,16 @@
+version: 2
+jobs:
+  build:
+    working_directory: /go/src/github.com/openzipkin/zipkin-go
+    parallelism: 1
+    docker:
+      - image: circleci/golang
+    steps:
+      - checkout
+      - run: echo 'deb http://www.rabbitmq.com/debian/ testing main' | sudo tee /etc/apt/sources.list.d/rabbitmq.list
+      - run: wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add -
+      - run: sudo apt-get update
+      - run: sudo apt-get install rabbitmq-server
+      - run: sudo service rabbitmq-server start
+      - run: go get -t -v -d ./...
+      - run: make vet test bench
diff --git a/vendor/github.com/openzipkin/zipkin-go/context.go b/vendor/github.com/openzipkin/zipkin-go/context.go
new file mode 100644
index 00000000000..bd25ddcb37b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/context.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"context"
+)
+
+var defaultNoopSpan = &noopSpan{}
+
+// SpanFromContext retrieves a Zipkin Span from Go's context propagation
+// mechanism if found. If not found, returns nil.
+func SpanFromContext(ctx context.Context) Span {
+	if s, ok := ctx.Value(spanKey).(Span); ok {
+		return s
+	}
+	return nil
+}
+
+// SpanOrNoopFromContext retrieves a Zipkin Span from Go's context propagation
+// mechanism if found. If not found, returns a noopSpan.
+// This function typically is used for modules that want to provide existing
+// Zipkin spans with additional data, but can't guarantee that spans are
+// properly propagated. It is preferred to use SpanFromContext() and test for
+// Nil instead of using this function.
+func SpanOrNoopFromContext(ctx context.Context) Span {
+	if s, ok := ctx.Value(spanKey).(Span); ok {
+		return s
+	}
+	return defaultNoopSpan
+}
+
+// NewContext stores a Zipkin Span into Go's context propagation mechanism.
+func NewContext(ctx context.Context, s Span) context.Context {
+	return context.WithValue(ctx, spanKey, s)
+}
+
+type ctxKey struct{}
+
+var spanKey = ctxKey{}
diff --git a/vendor/github.com/openzipkin/zipkin-go/doc.go b/vendor/github.com/openzipkin/zipkin-go/doc.go
new file mode 100644
index 00000000000..18cc62f824b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package zipkin implements a native Zipkin instrumentation library for Go.
+
+See https://zipkin.io for more information about Zipkin.
+*/
+package zipkin
diff --git a/vendor/github.com/openzipkin/zipkin-go/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/endpoint.go
new file mode 100644
index 00000000000..4a1a6c70519
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/endpoint.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"net"
+	"strconv"
+	"strings"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// NewEndpoint creates a new endpoint given the provided serviceName and
+// hostPort.
+func NewEndpoint(serviceName string, hostPort string) (*model.Endpoint, error) {
+	e := &model.Endpoint{
+		ServiceName: serviceName,
+	}
+
+	if hostPort == "" || hostPort == ":0" {
+		if serviceName == "" {
+			// if all properties are empty we should not have an Endpoint object.
+			return nil, nil
+		}
+		return e, nil
+	}
+
+	if strings.IndexByte(hostPort, ':') < 0 {
+		hostPort += ":0"
+	}
+
+	host, port, err := net.SplitHostPort(hostPort)
+	if err != nil {
+		return nil, err
+	}
+
+	p, err := strconv.ParseUint(port, 10, 16)
+	if err != nil {
+		return nil, err
+	}
+	e.Port = uint16(p)
+
+	addrs, err := net.LookupIP(host)
+	if err != nil {
+		return nil, err
+	}
+
+	for i := range addrs {
+		addr := addrs[i].To4()
+		if addr == nil {
+			// IPv6 - 16 bytes
+			if e.IPv6 == nil {
+				e.IPv6 = addrs[i].To16()
+			}
+		} else {
+			// IPv4 - 4 bytes
+			if e.IPv4 == nil {
+				e.IPv4 = addr
+			}
+		}
+		if e.IPv4 != nil && e.IPv6 != nil {
+			// Both IPv4 & IPv6 have been set, done...
+			break
+		}
+	}
+
+	return e, nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/go.mod b/vendor/github.com/openzipkin/zipkin-go/go.mod
new file mode 100644
index 00000000000..ed37c1c2cba
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/go.mod
@@ -0,0 +1,25 @@
+module github.com/openzipkin/zipkin-go
+
+require (
+	github.com/Shopify/sarama v1.19.0
+	github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/eapache/go-resiliency v1.1.0 // indirect
+	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
+	github.com/eapache/queue v1.1.0 // indirect
+	github.com/gogo/protobuf v1.2.0
+	github.com/golang/protobuf v1.2.0
+	github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
+	github.com/gorilla/context v1.1.1 // indirect
+	github.com/gorilla/mux v1.6.2
+	github.com/onsi/ginkgo v1.7.0
+	github.com/onsi/gomega v1.4.3
+	github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 // indirect
+	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect
+	github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94
+	golang.org/x/net v0.0.0-20190311183353-d8887717615a
+	golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
+	google.golang.org/grpc v1.20.0
+)
+
+go 1.12
diff --git a/vendor/github.com/openzipkin/zipkin-go/go.sum b/vendor/github.com/openzipkin/zipkin-go/go.sum
new file mode 100644
index 00000000000..5274bc9f400
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/go.sum
@@ -0,0 +1,76 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 h1:VGcrWe3yk6o+t7BdVNy5UDPWa4OZuDWtE1W1ZbS7Kyw=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go
new file mode 100644
index 00000000000..3e857010fc9
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go
@@ -0,0 +1,130 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package idgenerator contains several Span and Trace ID generators which can be
+used by the Zipkin tracer. Additional third party generators can be plugged in
+if they adhere to the IDGenerator interface.
+*/
+package idgenerator
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+var (
+	seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
+	// NewSource returns a new pseudo-random Source seeded with the given value.
+	// Unlike the default Source used by top-level functions, this source is not
+	// safe for concurrent use by multiple goroutines. Hence the need for a mutex.
+	seededIDLock sync.Mutex
+)
+
+// IDGenerator interface can be used to provide the Zipkin Tracer with custom
+// implementations to generate Span and Trace IDs.
+type IDGenerator interface {
+	SpanID(traceID model.TraceID) model.ID // Generates a new Span ID
+	TraceID() model.TraceID                // Generates a new Trace ID
+}
+
+// NewRandom64 returns an ID Generator which can generate 64 bit trace and span
+// id's
+func NewRandom64() IDGenerator {
+	return &randomID64{}
+}
+
+// NewRandom128 returns an ID Generator which can generate 128 bit trace and 64
+// bit span id's
+func NewRandom128() IDGenerator {
+	return &randomID128{}
+}
+
+// NewRandomTimestamped generates 128 bit time sortable traceid's and 64 bit
+// spanid's.
+func NewRandomTimestamped() IDGenerator {
+	return &randomTimestamped{}
+}
+
+// randomID64 can generate 64 bit traceid's and 64 bit spanid's.
+type randomID64 struct{}
+
+func (r *randomID64) TraceID() (id model.TraceID) {
+	seededIDLock.Lock()
+	id = model.TraceID{
+		Low: uint64(seededIDGen.Int63()),
+	}
+	seededIDLock.Unlock()
+	return
+}
+
+func (r *randomID64) SpanID(traceID model.TraceID) (id model.ID) {
+	if !traceID.Empty() {
+		return model.ID(traceID.Low)
+	}
+	seededIDLock.Lock()
+	id = model.ID(seededIDGen.Int63())
+	seededIDLock.Unlock()
+	return
+}
+
+// randomID128 can generate 128 bit traceid's and 64 bit spanid's.
+type randomID128 struct{}
+
+func (r *randomID128) TraceID() (id model.TraceID) {
+	seededIDLock.Lock()
+	id = model.TraceID{
+		High: uint64(seededIDGen.Int63()),
+		Low:  uint64(seededIDGen.Int63()),
+	}
+	seededIDLock.Unlock()
+	return
+}
+
+func (r *randomID128) SpanID(traceID model.TraceID) (id model.ID) {
+	if !traceID.Empty() {
+		return model.ID(traceID.Low)
+	}
+	seededIDLock.Lock()
+	id = model.ID(seededIDGen.Int63())
+	seededIDLock.Unlock()
+	return
+}
+
+// randomTimestamped can generate 128 bit time sortable traceid's compatible
+// with AWS X-Ray and 64 bit spanid's.
+type randomTimestamped struct{}
+
+func (t *randomTimestamped) TraceID() (id model.TraceID) {
+	seededIDLock.Lock()
+	id = model.TraceID{
+		High: uint64(time.Now().Unix()<<32) + uint64(seededIDGen.Int31()),
+		Low:  uint64(seededIDGen.Int63()),
+	}
+	seededIDLock.Unlock()
+	return
+}
+
+func (t *randomTimestamped) SpanID(traceID model.TraceID) (id model.ID) {
+	if !traceID.Empty() {
+		return model.ID(traceID.Low)
+	}
+	seededIDLock.Lock()
+	id = model.ID(seededIDGen.Int63())
+	seededIDLock.Unlock()
+	return
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go
new file mode 100644
index 00000000000..795bc4143f2
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go
@@ -0,0 +1,60 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"time"
+)
+
+// ErrValidTimestampRequired error
+var ErrValidTimestampRequired = errors.New("valid annotation timestamp required")
+
+// Annotation associates an event that explains latency with a timestamp.
+type Annotation struct {
+	Timestamp time.Time
+	Value     string
+}
+
+// MarshalJSON implements custom JSON encoding
+func (a *Annotation) MarshalJSON() ([]byte, error) {
+	return json.Marshal(&struct {
+		Timestamp int64  `json:"timestamp"`
+		Value     string `json:"value"`
+	}{
+		Timestamp: a.Timestamp.Round(time.Microsecond).UnixNano() / 1e3,
+		Value:     a.Value,
+	})
+}
+
+// UnmarshalJSON implements custom JSON decoding
+func (a *Annotation) UnmarshalJSON(b []byte) error {
+	type Alias Annotation
+	annotation := &struct {
+		TimeStamp uint64 `json:"timestamp"`
+		*Alias
+	}{
+		Alias: (*Alias)(a),
+	}
+	if err := json.Unmarshal(b, &annotation); err != nil {
+		return err
+	}
+	if annotation.TimeStamp < 1 {
+		return ErrValidTimestampRequired
+	}
+	a.Timestamp = time.Unix(0, int64(annotation.TimeStamp)*1e3)
+	return nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go
new file mode 100644
index 00000000000..1b11b4df795
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package model contains the Zipkin V2 model which is used by the Zipkin Go
+tracer implementation.
+
+Third party instrumentation libraries can use the model and transport packages
+found in this Zipkin Go library to directly interface with the Zipkin Server or
+Zipkin Collectors without the need to use the tracer implementation itself.
+*/
+package model
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go
new file mode 100644
index 00000000000..58880bd1577
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "net"
+
+// Endpoint holds the network context of a node in the service graph.
+type Endpoint struct {
+	ServiceName string `json:"serviceName,omitempty"`
+	IPv4        net.IP `json:"ipv4,omitempty"`
+	IPv6        net.IP `json:"ipv6,omitempty"`
+	Port        uint16 `json:"port,omitempty"`
+}
+
+// Empty returns if all Endpoint properties are empty / unspecified.
+func (e *Endpoint) Empty() bool {
+	return e == nil ||
+		(e.ServiceName == "" && e.Port == 0 && len(e.IPv4) == 0 && len(e.IPv6) == 0)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go
new file mode 100644
index 00000000000..5d512ad90f2
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/kind.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Kind clarifies context of timestamp, duration and remoteEndpoint in a span.
+type Kind string
+
+// Available Kind values
+const (
+	Undetermined Kind = ""
+	Client       Kind = "CLIENT"
+	Server       Kind = "SERVER"
+	Producer     Kind = "PRODUCER"
+	Consumer     Kind = "CONSUMER"
+)
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go
new file mode 100644
index 00000000000..f428413f1a5
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/span.go
@@ -0,0 +1,138 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"time"
+)
+
+// unmarshal errors
+var (
+	ErrValidTraceIDRequired  = errors.New("valid traceId required")
+	ErrValidIDRequired       = errors.New("valid span id required")
+	ErrValidDurationRequired = errors.New("valid duration required")
+)
+
+// SpanContext holds the context of a Span.
+type SpanContext struct {
+	TraceID  TraceID `json:"traceId"`
+	ID       ID      `json:"id"`
+	ParentID *ID     `json:"parentId,omitempty"`
+	Debug    bool    `json:"debug,omitempty"`
+	Sampled  *bool   `json:"-"`
+	Err      error   `json:"-"`
+}
+
+// SpanModel structure.
+//
+// If using this library to instrument your application you will not need to
+// directly access or modify this representation. The SpanModel is exported for
+// use cases involving 3rd party Go instrumentation libraries desiring to
+// export data to a Zipkin server using the Zipkin V2 Span model.
+type SpanModel struct {
+	SpanContext
+	Name           string            `json:"name,omitempty"`
+	Kind           Kind              `json:"kind,omitempty"`
+	Timestamp      time.Time         `json:"-"`
+	Duration       time.Duration     `json:"-"`
+	Shared         bool              `json:"shared,omitempty"`
+	LocalEndpoint  *Endpoint         `json:"localEndpoint,omitempty"`
+	RemoteEndpoint *Endpoint         `json:"remoteEndpoint,omitempty"`
+	Annotations    []Annotation      `json:"annotations,omitempty"`
+	Tags           map[string]string `json:"tags,omitempty"`
+}
+
+// MarshalJSON exports our Model into the correct format for the Zipkin V2 API.
+func (s SpanModel) MarshalJSON() ([]byte, error) {
+	type Alias SpanModel
+
+	var timestamp int64
+	if !s.Timestamp.IsZero() {
+		if s.Timestamp.Unix() < 1 {
+			// Zipkin does not allow Timestamps before Unix epoch
+			return nil, ErrValidTimestampRequired
+		}
+		timestamp = s.Timestamp.Round(time.Microsecond).UnixNano() / 1e3
+	}
+
+	if s.Duration < time.Microsecond {
+		if s.Duration < 0 {
+			// negative duration is not allowed and signals a timing logic error
+			return nil, ErrValidDurationRequired
+		} else if s.Duration > 0 {
+			// sub microsecond durations are reported as 1 microsecond
+			s.Duration = 1 * time.Microsecond
+		}
+	} else {
+		// Duration will be rounded to nearest microsecond representation.
+		//
+		// NOTE: Duration.Round() is not available in Go 1.8 which we still support.
+		// To handle microsecond resolution rounding we'll add 500 nanoseconds to
+		// the duration. When truncated to microseconds in the call to marshal, it
+		// will be naturally rounded. See TestSpanDurationRounding in span_test.go
+		s.Duration += 500 * time.Nanosecond
+	}
+
+	if s.LocalEndpoint.Empty() {
+		s.LocalEndpoint = nil
+	}
+
+	if s.RemoteEndpoint.Empty() {
+		s.RemoteEndpoint = nil
+	}
+
+	return json.Marshal(&struct {
+		T int64 `json:"timestamp,omitempty"`
+		D int64 `json:"duration,omitempty"`
+		Alias
+	}{
+		T:     timestamp,
+		D:     s.Duration.Nanoseconds() / 1e3,
+		Alias: (Alias)(s),
+	})
+}
+
+// UnmarshalJSON imports our Model from a Zipkin V2 API compatible span
+// representation.
+func (s *SpanModel) UnmarshalJSON(b []byte) error {
+	type Alias SpanModel
+	span := &struct {
+		T uint64 `json:"timestamp,omitempty"`
+		D uint64 `json:"duration,omitempty"`
+		*Alias
+	}{
+		Alias: (*Alias)(s),
+	}
+	if err := json.Unmarshal(b, &span); err != nil {
+		return err
+	}
+	if s.ID < 1 {
+		return ErrValidIDRequired
+	}
+	if span.T > 0 {
+		s.Timestamp = time.Unix(0, int64(span.T)*1e3)
+	}
+	s.Duration = time.Duration(span.D*1e3) * time.Nanosecond
+	if s.LocalEndpoint.Empty() {
+		s.LocalEndpoint = nil
+	}
+
+	if s.RemoteEndpoint.Empty() {
+		s.RemoteEndpoint = nil
+	}
+	return nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go
new file mode 100644
index 00000000000..452dc871b20
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go
@@ -0,0 +1,44 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// ID type
+type ID uint64
+
+// String outputs the 64-bit ID as hex string.
+func (i ID) String() string {
+	return fmt.Sprintf("%016x", uint64(i))
+}
+
+// MarshalJSON serializes an ID type (SpanID, ParentSpanID) to HEX.
+func (i ID) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", i.String())), nil
+}
+
+// UnmarshalJSON deserializes an ID type (SpanID, ParentSpanID) from HEX.
+func (i *ID) UnmarshalJSON(b []byte) (err error) {
+	var id uint64
+	if len(b) < 3 {
+		return nil
+	}
+	id, err = strconv.ParseUint(string(b[1:len(b)-1]), 16, 64)
+	*i = ID(id)
+	return err
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go
new file mode 100644
index 00000000000..68d12d386c1
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go
@@ -0,0 +1,75 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// TraceID is a 128 bit number internally stored as 2x uint64 (high & low).
+// In case of 64 bit traceIDs, the value can be found in Low.
+type TraceID struct {
+	High uint64
+	Low  uint64
+}
+
+// Empty returns if TraceID has zero value.
+func (t TraceID) Empty() bool {
+	return t.Low == 0 && t.High == 0
+}
+
+// String outputs the 128-bit traceID as hex string.
+func (t TraceID) String() string {
+	if t.High == 0 {
+		return fmt.Sprintf("%016x", t.Low)
+	}
+	return fmt.Sprintf("%016x%016x", t.High, t.Low)
+}
+
+// TraceIDFromHex returns the TraceID from a hex string.
+func TraceIDFromHex(h string) (t TraceID, err error) {
+	if len(h) > 16 {
+		if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil {
+			return
+		}
+		t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64)
+		return
+	}
+	t.Low, err = strconv.ParseUint(h, 16, 64)
+	return
+}
+
+// MarshalJSON custom JSON serializer to export the TraceID in the required
+// zero padded hex representation.
+func (t TraceID) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", t.String())), nil
+}
+
+// UnmarshalJSON custom JSON deserializer to retrieve the traceID from the hex
+// encoded representation.
+func (t *TraceID) UnmarshalJSON(traceID []byte) error {
+	if len(traceID) < 3 {
+		return ErrValidTraceIDRequired
+	}
+	// A valid JSON string is encoded wrapped in double quotes. We need to trim
+	// these before converting the hex payload.
+	tID, err := TraceIDFromHex(string(traceID[1 : len(traceID)-1]))
+	if err != nil {
+		return err
+	}
+	*t = tID
+	return nil
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/noop.go b/vendor/github.com/openzipkin/zipkin-go/noop.go
new file mode 100644
index 00000000000..1368b9e77aa
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/noop.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+type noopSpan struct {
+	model.SpanContext
+}
+
+func (n *noopSpan) Context() model.SpanContext { return n.SpanContext }
+
+func (n *noopSpan) SetName(string) {}
+
+func (*noopSpan) SetRemoteEndpoint(*model.Endpoint) {}
+
+func (*noopSpan) Annotate(time.Time, string) {}
+
+func (*noopSpan) Tag(string, string) {}
+
+func (*noopSpan) Finish() {}
+
+func (*noopSpan) FinishedWithDuration(duration time.Duration) {}
+
+func (*noopSpan) Flush() {}
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go
new file mode 100644
index 00000000000..27ce5e040e8
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package b3 implements serialization and deserialization logic for Zipkin
+B3 Headers.
+*/
+package b3
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go
new file mode 100644
index 00000000000..a1b30fa41f2
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go
@@ -0,0 +1,87 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b3
+
+import (
+	"google.golang.org/grpc/metadata"
+
+	"github.com/openzipkin/zipkin-go/model"
+	"github.com/openzipkin/zipkin-go/propagation"
+)
+
+// ExtractGRPC will extract a span.Context from the gRPC Request metadata if
+// found in B3 header format.
+func ExtractGRPC(md *metadata.MD) propagation.Extractor {
+	return func() (*model.SpanContext, error) {
+		var (
+			traceIDHeader      = GetGRPCHeader(md, TraceID)
+			spanIDHeader       = GetGRPCHeader(md, SpanID)
+			parentSpanIDHeader = GetGRPCHeader(md, ParentSpanID)
+			sampledHeader      = GetGRPCHeader(md, Sampled)
+			flagsHeader        = GetGRPCHeader(md, Flags)
+		)
+
+		return ParseHeaders(
+			traceIDHeader, spanIDHeader, parentSpanIDHeader, sampledHeader,
+			flagsHeader,
+		)
+	}
+}
+
+// InjectGRPC will inject a span.Context into gRPC metadata.
+func InjectGRPC(md *metadata.MD) propagation.Injector {
+	return func(sc model.SpanContext) error {
+		if (model.SpanContext{}) == sc {
+			return ErrEmptyContext
+		}
+
+		if sc.Debug {
+			setGRPCHeader(md, Flags, "1")
+		} else if sc.Sampled != nil {
+			// Debug is encoded as X-B3-Flags: 1. Since Debug implies Sampled,
+			// we don't send "X-B3-Sampled" if Debug is set.
+			if *sc.Sampled {
+				setGRPCHeader(md, Sampled, "1")
+			} else {
+				setGRPCHeader(md, Sampled, "0")
+			}
+		}
+
+		if !sc.TraceID.Empty() && sc.ID > 0 {
+			// set identifiers
+			setGRPCHeader(md, TraceID, sc.TraceID.String())
+			setGRPCHeader(md, SpanID, sc.ID.String())
+			if sc.ParentID != nil {
+				setGRPCHeader(md, ParentSpanID, sc.ParentID.String())
+			}
+		}
+
+		return nil
+	}
+}
+
+// GetGRPCHeader retrieves the last value found for a particular key. If key is
+// not found it returns an empty string.
+func GetGRPCHeader(md *metadata.MD, key string) string {
+	v := (*md)[key]
+	if len(v) < 1 {
+		return ""
+	}
+	return v[len(v)-1]
+}
+
+func setGRPCHeader(md *metadata.MD, key, value string) {
+	(*md)[key] = append((*md)[key], value)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go
new file mode 100644
index 00000000000..d987f94115f
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b3
+
+import (
+	"net/http"
+
+	"github.com/openzipkin/zipkin-go/model"
+	"github.com/openzipkin/zipkin-go/propagation"
+)
+
+type InjectOption func(opts *InjectOptions)
+
+type InjectOptions struct {
+	shouldInjectSingleHeader bool
+	shouldInjectMultiHeader  bool
+}
+
+// WithSingleAndMultiHeader allows to include both single and multiple
+// headers in the context injection
+func WithSingleAndMultiHeader() InjectOption {
+	return func(opts *InjectOptions) {
+		opts.shouldInjectSingleHeader = true
+		opts.shouldInjectMultiHeader = true
+	}
+}
+
+// WithSingleHeaderOnly allows to include only single header in the context
+// injection
+func WithSingleHeaderOnly() InjectOption {
+	return func(opts *InjectOptions) {
+		opts.shouldInjectSingleHeader = true
+		opts.shouldInjectMultiHeader = false
+	}
+}
+
+// ExtractHTTP will extract a span.Context from the HTTP Request if found in
+// B3 header format.
+func ExtractHTTP(r *http.Request) propagation.Extractor {
+	return func() (*model.SpanContext, error) {
+		var (
+			traceIDHeader      = r.Header.Get(TraceID)
+			spanIDHeader       = r.Header.Get(SpanID)
+			parentSpanIDHeader = r.Header.Get(ParentSpanID)
+			sampledHeader      = r.Header.Get(Sampled)
+			flagsHeader        = r.Header.Get(Flags)
+			singleHeader       = r.Header.Get(Context)
+		)
+
+		var (
+			sc   *model.SpanContext
+			sErr error
+			mErr error
+		)
+		if singleHeader != "" {
+			sc, sErr = ParseSingleHeader(singleHeader)
+			if sErr == nil {
+				return sc, nil
+			}
+		}
+
+		sc, mErr = ParseHeaders(
+			traceIDHeader, spanIDHeader, parentSpanIDHeader,
+			sampledHeader, flagsHeader,
+		)
+
+		if mErr != nil && sErr != nil {
+			return nil, sErr
+		}
+
+		return sc, mErr
+	}
+}
+
+// InjectHTTP will inject a span.Context into a HTTP Request
+func InjectHTTP(r *http.Request, opts ...InjectOption) propagation.Injector {
+	options := InjectOptions{shouldInjectMultiHeader: true}
+	for _, opt := range opts {
+		opt(&options)
+	}
+
+	return func(sc model.SpanContext) error {
+		if (model.SpanContext{}) == sc {
+			return ErrEmptyContext
+		}
+
+		if options.shouldInjectMultiHeader {
+			if sc.Debug {
+				r.Header.Set(Flags, "1")
+			} else if sc.Sampled != nil {
+				// Debug is encoded as X-B3-Flags: 1. Since Debug implies Sampled,
+				// so don't also send "X-B3-Sampled: 1".
+				if *sc.Sampled {
+					r.Header.Set(Sampled, "1")
+				} else {
+					r.Header.Set(Sampled, "0")
+				}
+			}
+
+			if !sc.TraceID.Empty() && sc.ID > 0 {
+				r.Header.Set(TraceID, sc.TraceID.String())
+				r.Header.Set(SpanID, sc.ID.String())
+				if sc.ParentID != nil {
+					r.Header.Set(ParentSpanID, sc.ParentID.String())
+				}
+			}
+		}
+
+		if options.shouldInjectSingleHeader {
+			r.Header.Set(Context, BuildSingleHeader(sc))
+		}
+
+		return nil
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/map.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/map.go
new file mode 100644
index 00000000000..0bb6f6bb7be
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/map.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b3
+
+import (
+	"github.com/openzipkin/zipkin-go/model"
+	"github.com/openzipkin/zipkin-go/propagation"
+)
+
+// Map allows serialization and deserialization of SpanContext into a standard Go map.
+type Map map[string]string
+
+// Extract implements Extractor
+func (m *Map) Extract() (*model.SpanContext, error) {
+	var (
+		traceIDHeader      = (*m)[TraceID]
+		spanIDHeader       = (*m)[SpanID]
+		parentSpanIDHeader = (*m)[ParentSpanID]
+		sampledHeader      = (*m)[Sampled]
+		flagsHeader        = (*m)[Flags]
+		singleHeader       = (*m)[Context]
+	)
+
+	var (
+		sc   *model.SpanContext
+		sErr error
+		mErr error
+	)
+	if singleHeader != "" {
+		sc, sErr = ParseSingleHeader(singleHeader)
+		if sErr == nil {
+			return sc, nil
+		}
+	}
+
+	sc, mErr = ParseHeaders(
+		traceIDHeader, spanIDHeader, parentSpanIDHeader,
+		sampledHeader, flagsHeader,
+	)
+
+	if mErr != nil && sErr != nil {
+		return nil, sErr
+	}
+
+	return sc, mErr
+
+}
+
+// Inject implements Injector
+func (m *Map) Inject(opts ...InjectOption) propagation.Injector {
+	options := InjectOptions{shouldInjectMultiHeader: true}
+	for _, opt := range opts {
+		opt(&options)
+	}
+
+	return func(sc model.SpanContext) error {
+		if (model.SpanContext{}) == sc {
+			return ErrEmptyContext
+		}
+
+		if options.shouldInjectMultiHeader {
+			if sc.Debug {
+				(*m)[Flags] = "1"
+			} else if sc.Sampled != nil {
+				// Debug is encoded as X-B3-Flags: 1. Since Debug implies Sampled,
+				// so don't also send "X-B3-Sampled: 1".
+				if *sc.Sampled {
+					(*m)[Sampled] = "1"
+				} else {
+					(*m)[Sampled] = "0"
+				}
+			}
+
+			if !sc.TraceID.Empty() && sc.ID > 0 {
+				(*m)[TraceID] = sc.TraceID.String()
+				(*m)[SpanID] = sc.ID.String()
+				if sc.ParentID != nil {
+					(*m)[ParentSpanID] = sc.ParentID.String()
+				}
+			}
+		}
+
+		if options.shouldInjectSingleHeader {
+			(*m)[Context] = BuildSingleHeader(sc)
+		}
+
+		return nil
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go
new file mode 100644
index 00000000000..04bcae832df
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go
@@ -0,0 +1,44 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b3
+
+import "errors"
+
+// Common Header Extraction / Injection errors
+var (
+	ErrInvalidSampledByte        = errors.New("invalid B3 Sampled found")
+	ErrInvalidSampledHeader      = errors.New("invalid B3 Sampled header found")
+	ErrInvalidFlagsHeader        = errors.New("invalid B3 Flags header found")
+	ErrInvalidTraceIDHeader      = errors.New("invalid B3 TraceID header found")
+	ErrInvalidSpanIDHeader       = errors.New("invalid B3 SpanID header found")
+	ErrInvalidParentSpanIDHeader = errors.New("invalid B3 ParentSpanID header found")
+	ErrInvalidScope              = errors.New("require either both TraceID and SpanID or none")
+	ErrInvalidScopeParent        = errors.New("ParentSpanID requires both TraceID and SpanID to be available")
+	ErrInvalidScopeParentSingle  = errors.New("ParentSpanID requires TraceID, SpanID and Sampled to be available")
+	ErrEmptyContext              = errors.New("empty request context")
+	ErrInvalidTraceIDValue       = errors.New("invalid B3 TraceID value found")
+	ErrInvalidSpanIDValue        = errors.New("invalid B3 SpanID value found")
+	ErrInvalidParentSpanIDValue  = errors.New("invalid B3 ParentSpanID value found")
+)
+
+// Default B3 Header keys
+const (
+	TraceID      = "x-b3-traceid"
+	SpanID       = "x-b3-spanid"
+	ParentSpanID = "x-b3-parentspanid"
+	Sampled      = "x-b3-sampled"
+	Flags        = "x-b3-flags"
+	Context      = "b3"
+)
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go
new file mode 100644
index 00000000000..e3569e0d2ce
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go
@@ -0,0 +1,202 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b3
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// ParseHeaders takes values found from B3 Headers and tries to reconstruct a
+// SpanContext.
+func ParseHeaders(
+	hdrTraceID, hdrSpanID, hdrParentSpanID, hdrSampled, hdrFlags string,
+) (*model.SpanContext, error) {
+	var (
+		err           error
+		spanID        uint64
+		requiredCount int
+		sc            = &model.SpanContext{}
+	)
+
+	// correct values for an existing sampled header are "0" and "1".
+	// For legacy support and  being lenient to other tracing implementations we
+	// allow "true" and "false" as inputs for interop purposes.
+	switch strings.ToLower(hdrSampled) {
+	case "0", "false":
+		sampled := false
+		sc.Sampled = &sampled
+	case "1", "true":
+		sampled := true
+		sc.Sampled = &sampled
+	case "":
+		// sc.Sampled = nil
+	default:
+		return nil, ErrInvalidSampledHeader
+	}
+
+	// The only accepted value for Flags is "1". This will set Debug to true. All
+	// other values and omission of header will be ignored.
+	if hdrFlags == "1" {
+		sc.Debug = true
+		sc.Sampled = nil
+	}
+
+	if hdrTraceID != "" {
+		requiredCount++
+		if sc.TraceID, err = model.TraceIDFromHex(hdrTraceID); err != nil {
+			return nil, ErrInvalidTraceIDHeader
+		}
+	}
+
+	if hdrSpanID != "" {
+		requiredCount++
+		if spanID, err = strconv.ParseUint(hdrSpanID, 16, 64); err != nil {
+			return nil, ErrInvalidSpanIDHeader
+		}
+		sc.ID = model.ID(spanID)
+	}
+
+	if requiredCount != 0 && requiredCount != 2 {
+		return nil, ErrInvalidScope
+	}
+
+	if hdrParentSpanID != "" {
+		if requiredCount == 0 {
+			return nil, ErrInvalidScopeParent
+		}
+		if spanID, err = strconv.ParseUint(hdrParentSpanID, 16, 64); err != nil {
+			return nil, ErrInvalidParentSpanIDHeader
+		}
+		parentSpanID := model.ID(spanID)
+		sc.ParentID = &parentSpanID
+	}
+
+	return sc, nil
+}
+
+// ParseSingleHeader takes values found from B3 Single Header and tries to reconstruct a
+// SpanContext.
+func ParseSingleHeader(contextHeader string) (*model.SpanContext, error) {
+	if contextHeader == "" {
+		return nil, ErrEmptyContext
+	}
+
+	var (
+		sc       = model.SpanContext{}
+		sampling string
+	)
+
+	headerLen := len(contextHeader)
+
+	if headerLen == 1 {
+		sampling = contextHeader
+	} else if headerLen == 16 || headerLen == 32 {
+		return nil, ErrInvalidScope
+	} else if headerLen >= 16+16+1 {
+		var high, low uint64
+		pos := 0
+		if string(contextHeader[16]) != "-" {
+			// traceID must be 128 bits
+			var err error
+			high, err = strconv.ParseUint(contextHeader[0:16], 16, 64)
+			if err != nil {
+				return nil, ErrInvalidTraceIDValue
+			}
+			pos = 16
+		}
+
+		low, err := strconv.ParseUint(contextHeader[pos+1:pos+16], 16, 64)
+		if err != nil {
+			return nil, ErrInvalidTraceIDValue
+		}
+
+		sc.TraceID = model.TraceID{High: high, Low: low}
+
+		rawID, err := strconv.ParseUint(contextHeader[pos+16+1:pos+16+1+16], 16, 64)
+		if err != nil {
+			return nil, ErrInvalidSpanIDValue
+		}
+
+		sc.ID = model.ID(rawID)
+
+		if headerLen > pos+16+1+16 {
+			if headerLen == pos+16+1+16+1 {
+				return nil, ErrInvalidSampledByte
+			}
+
+			if headerLen == pos+16+1+16+1+1 {
+				sampling = string(contextHeader[pos+16+1+16+1])
+			} else if headerLen == pos+16+1+16+1+16 {
+				return nil, ErrInvalidScopeParentSingle
+			} else if headerLen == pos+16+1+16+1+1+1+16 {
+				sampling = string(contextHeader[pos+16+1+16+1])
+
+				rawParentID, err := strconv.ParseUint(contextHeader[pos+16+1+16+1+1+1:], 16, 64)
+				if err != nil {
+					return nil, ErrInvalidParentSpanIDValue
+				}
+
+				parentID := model.ID(rawParentID)
+				sc.ParentID = &parentID
+			} else {
+				return nil, ErrInvalidParentSpanIDValue
+			}
+		}
+	} else {
+		return nil, ErrInvalidTraceIDValue
+	}
+	switch sampling {
+	case "d":
+		sc.Debug = true
+	case "1":
+		trueVal := true
+		sc.Sampled = &trueVal
+	case "0":
+		falseVal := false
+		sc.Sampled = &falseVal
+	case "":
+	default:
+		return nil, ErrInvalidSampledByte
+	}
+
+	return &sc, nil
+}
+
+// BuildSingleHeader takes the values from the SpanContext and builds the B3 header
+func BuildSingleHeader(sc model.SpanContext) string {
+	header := []string{}
+	if !sc.TraceID.Empty() && sc.ID > 0 {
+		header = append(header, sc.TraceID.String(), sc.ID.String())
+	}
+
+	if sc.Debug {
+		header = append(header, "d")
+	} else if sc.Sampled != nil {
+		if *sc.Sampled {
+			header = append(header, "1")
+		} else {
+			header = append(header, "0")
+		}
+	}
+
+	if sc.ParentID != nil {
+		header = append(header, sc.ParentID.String())
+	}
+
+	return strings.Join(header, "-")
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go b/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go
new file mode 100644
index 00000000000..067b28e8d8f
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package propagation holds the required function signatures for Injection and
+Extraction as used by the Zipkin Tracer.
+
+Subpackages of this package contain officially supported standard propagation
+implementations.
+*/
+package propagation
+
+import "github.com/openzipkin/zipkin-go/model"
+
+// Extractor function signature
+type Extractor func() (*model.SpanContext, error)
+
+// Injector function signature
+type Injector func(model.SpanContext) error
diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go b/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go
new file mode 100644
index 00000000000..fe3df454d5b
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go
@@ -0,0 +1,257 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package http implements a HTTP reporter to send spans to Zipkin V2 collectors.
+*/
+package http
+
+import (
+	"bytes"
+	"log"
+	"net/http"
+	"os"
+	"sync"
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+	"github.com/openzipkin/zipkin-go/reporter"
+)
+
+// defaults
+const (
+	defaultTimeout       = time.Second * 5 // timeout for http request in seconds
+	defaultBatchInterval = time.Second * 1 // BatchInterval in seconds
+	defaultBatchSize     = 100
+	defaultMaxBacklog    = 1000
+)
+
+// httpReporter will send spans to a Zipkin HTTP Collector using Zipkin V2 API.
+type httpReporter struct {
+	url           string
+	client        *http.Client
+	logger        *log.Logger
+	batchInterval time.Duration
+	batchSize     int
+	maxBacklog    int
+	batchMtx      *sync.Mutex
+	batch         []*model.SpanModel
+	spanC         chan *model.SpanModel
+	sendC         chan struct{}
+	quit          chan struct{}
+	shutdown      chan error
+	reqCallback   RequestCallbackFn
+	serializer    reporter.SpanSerializer
+}
+
+// Send implements reporter
+func (r *httpReporter) Send(s model.SpanModel) {
+	r.spanC <- &s
+}
+
+// Close implements reporter
+func (r *httpReporter) Close() error {
+	close(r.quit)
+	return <-r.shutdown
+}
+
+func (r *httpReporter) loop() {
+	var (
+		nextSend   = time.Now().Add(r.batchInterval)
+		ticker     = time.NewTicker(r.batchInterval / 10)
+		tickerChan = ticker.C
+	)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case span := <-r.spanC:
+			currentBatchSize := r.append(span)
+			if currentBatchSize >= r.batchSize {
+				nextSend = time.Now().Add(r.batchInterval)
+				r.enqueueSend()
+			}
+		case <-tickerChan:
+			if time.Now().After(nextSend) {
+				nextSend = time.Now().Add(r.batchInterval)
+				r.enqueueSend()
+			}
+		case <-r.quit:
+			close(r.sendC)
+			return
+		}
+	}
+}
+
+func (r *httpReporter) sendLoop() {
+	for range r.sendC {
+		_ = r.sendBatch()
+	}
+	r.shutdown <- r.sendBatch()
+}
+
+func (r *httpReporter) enqueueSend() {
+	select {
+	case r.sendC <- struct{}{}:
+	default:
+		// Do nothing if there's a pending send request already
+	}
+}
+
+func (r *httpReporter) append(span *model.SpanModel) (newBatchSize int) {
+	r.batchMtx.Lock()
+
+	r.batch = append(r.batch, span)
+	if len(r.batch) > r.maxBacklog {
+		dispose := len(r.batch) - r.maxBacklog
+		r.logger.Printf("backlog too long, disposing %d spans", dispose)
+		r.batch = r.batch[dispose:]
+	}
+	newBatchSize = len(r.batch)
+
+	r.batchMtx.Unlock()
+	return
+}
+
+func (r *httpReporter) sendBatch() error {
+	// Select all current spans in the batch to be sent
+	r.batchMtx.Lock()
+	sendBatch := r.batch[:]
+	r.batchMtx.Unlock()
+
+	if len(sendBatch) == 0 {
+		return nil
+	}
+
+	body, err := r.serializer.Serialize(sendBatch)
+	if err != nil {
+		r.logger.Printf("failed when marshalling the spans batch: %s\n", err.Error())
+		return err
+	}
+
+	req, err := http.NewRequest("POST", r.url, bytes.NewReader(body))
+	if err != nil {
+		r.logger.Printf("failed when creating the request: %s\n", err.Error())
+		return err
+	}
+	req.Header.Set("Content-Type", r.serializer.ContentType())
+	if r.reqCallback != nil {
+		r.reqCallback(req)
+	}
+
+	resp, err := r.client.Do(req)
+	if err != nil {
+		r.logger.Printf("failed to send the request: %s\n", err.Error())
+		return err
+	}
+	_ = resp.Body.Close()
+	if resp.StatusCode < 200 || resp.StatusCode > 299 {
+		r.logger.Printf("failed the request with status code %d\n", resp.StatusCode)
+	}
+
+	// Remove sent spans from the batch even if they were not saved
+	r.batchMtx.Lock()
+	r.batch = r.batch[len(sendBatch):]
+	r.batchMtx.Unlock()
+
+	return nil
+}
+
+// RequestCallbackFn receives the initialized request from the Collector before
+// sending it over the wire. This allows one to plug in additional headers or
+// do other customization.
+type RequestCallbackFn func(*http.Request)
+
+// ReporterOption sets a parameter for the HTTP Reporter
+type ReporterOption func(r *httpReporter)
+
+// Timeout sets maximum timeout for http request.
+func Timeout(duration time.Duration) ReporterOption {
+	return func(r *httpReporter) { r.client.Timeout = duration }
+}
+
+// BatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 traces.
+func BatchSize(n int) ReporterOption {
+	return func(r *httpReporter) { r.batchSize = n }
+}
+
+// MaxBacklog sets the maximum backlog size. When batch size reaches this
+// threshold, spans from the beginning of the batch will be disposed.
+func MaxBacklog(n int) ReporterOption {
+	return func(r *httpReporter) { r.maxBacklog = n }
+}
+
+// BatchInterval sets the maximum duration we will buffer traces before
+// emitting them to the collector. The default batch interval is 1 second.
+func BatchInterval(d time.Duration) ReporterOption {
+	return func(r *httpReporter) { r.batchInterval = d }
+}
+
+// Client sets a custom http client to use.
+func Client(client *http.Client) ReporterOption {
+	return func(r *httpReporter) { r.client = client }
+}
+
+// RequestCallback registers a callback function to adjust the reporter
+// *http.Request before it sends the request to Zipkin.
+func RequestCallback(rc RequestCallbackFn) ReporterOption {
+	return func(r *httpReporter) { r.reqCallback = rc }
+}
+
+// Logger sets the logger used to report errors in the collection
+// process.
+func Logger(l *log.Logger) ReporterOption {
+	return func(r *httpReporter) { r.logger = l }
+}
+
+// Serializer sets the serialization function to use for sending span data to
+// Zipkin.
+func Serializer(serializer reporter.SpanSerializer) ReporterOption {
+	return func(r *httpReporter) {
+		if serializer != nil {
+			r.serializer = serializer
+		}
+	}
+}
+
+// NewReporter returns a new HTTP Reporter.
+// url should be the endpoint to send the spans to, e.g.
+// http://localhost:9411/api/v2/spans
+func NewReporter(url string, opts ...ReporterOption) reporter.Reporter {
+	r := httpReporter{
+		url:           url,
+		logger:        log.New(os.Stderr, "", log.LstdFlags),
+		client:        &http.Client{Timeout: defaultTimeout},
+		batchInterval: defaultBatchInterval,
+		batchSize:     defaultBatchSize,
+		maxBacklog:    defaultMaxBacklog,
+		batch:         []*model.SpanModel{},
+		spanC:         make(chan *model.SpanModel),
+		sendC:         make(chan struct{}, 1),
+		quit:          make(chan struct{}, 1),
+		shutdown:      make(chan error, 1),
+		batchMtx:      &sync.Mutex{},
+		serializer:    reporter.JSONSerializer{},
+	}
+
+	for _, opt := range opts {
+		opt(&r)
+	}
+
+	go r.loop()
+	go r.sendLoop()
+
+	return &r
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go b/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go
new file mode 100644
index 00000000000..921aff57560
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package reporter holds the Reporter interface which is used by the Zipkin
+Tracer to send finished spans.
+
+Subpackages of package reporter contain officially supported standard
+reporter implementations.
+*/
+package reporter
+
+import "github.com/openzipkin/zipkin-go/model"
+
+// Reporter interface can be used to provide the Zipkin Tracer with custom
+// implementations to publish Zipkin Span data.
+type Reporter interface {
+	Send(model.SpanModel) // Send Span data to the reporter
+	Close() error         // Close the reporter
+}
+
+type noopReporter struct{}
+
+func (r *noopReporter) Send(model.SpanModel) {}
+func (r *noopReporter) Close() error         { return nil }
+
+// NewNoopReporter returns a no-op Reporter implementation.
+func NewNoopReporter() Reporter {
+	return &noopReporter{}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go b/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go
new file mode 100644
index 00000000000..6647e2b9f2c
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reporter
+
+import (
+	"encoding/json"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// SpanSerializer describes the methods needed for allowing to set Span encoding
+// type for the various Zipkin transports.
+type SpanSerializer interface {
+	Serialize([]*model.SpanModel) ([]byte, error)
+	ContentType() string
+}
+
+// JSONSerializer implements the default JSON encoding SpanSerializer.
+type JSONSerializer struct{}
+
+// Serialize takes an array of Zipkin SpanModel objects and returns a JSON
+// encoding of it.
+func (JSONSerializer) Serialize(spans []*model.SpanModel) ([]byte, error) {
+	return json.Marshal(spans)
+}
+
+// ContentType returns the ContentType needed for this encoding.
+func (JSONSerializer) ContentType() string {
+	return "application/json"
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/sample.go b/vendor/github.com/openzipkin/zipkin-go/sample.go
new file mode 100644
index 00000000000..6103c138464
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/sample.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Sampler functions return if a Zipkin span should be sampled, based on its
+// traceID.
+type Sampler func(id uint64) bool
+
+// NeverSample will always return false. If used by a service it will not allow
+// the service to start traces but will still allow the service to participate
+// in traces started upstream.
+func NeverSample(_ uint64) bool { return false }
+
+// AlwaysSample will always return true. If used by a service it will always start
+// traces if no upstream trace has been propagated. If an incoming upstream trace
+// is not sampled the service will adhere to this and only propagate the context.
+func AlwaysSample(_ uint64) bool { return true }
+
+// NewModuloSampler provides a generic type Sampler.
+func NewModuloSampler(mod uint64) Sampler {
+	if mod < 2 {
+		return AlwaysSample
+	}
+	return func(id uint64) bool {
+		return (id % mod) == 0
+	}
+}
+
+// NewBoundarySampler is appropriate for high-traffic instrumentation who
+// provision random trace ids, and make the sampling decision only once.
+// It defends against nodes in the cluster selecting exactly the same ids.
+func NewBoundarySampler(rate float64, salt int64) (Sampler, error) {
+	if rate == 0.0 {
+		return NeverSample, nil
+	}
+	if rate == 1.0 {
+		return AlwaysSample, nil
+	}
+	if rate < 0.0001 || rate > 1 {
+		return nil, fmt.Errorf("rate should be 0.0 or between 0.0001 and 1: was %f", rate)
+	}
+
+	var (
+		boundary = int64(rate * 10000)
+		usalt    = uint64(salt)
+	)
+	return func(id uint64) bool {
+		return int64(math.Abs(float64(id^usalt)))%10000 < boundary
+	}, nil
+}
+
+// NewCountingSampler is appropriate for low-traffic instrumentation or
+// those who do not provision random trace ids. It is not appropriate for
+// collectors as the sampling decision isn't idempotent (consistent based
+// on trace id).
+func NewCountingSampler(rate float64) (Sampler, error) {
+	if rate == 0.0 {
+		return NeverSample, nil
+	}
+	if rate == 1.0 {
+		return AlwaysSample, nil
+	}
+	if rate < 0.01 || rate > 1 {
+		return nil, fmt.Errorf("rate should be 0.0 or between 0.01 and 1: was %f", rate)
+	}
+	var (
+		i         = 0
+		outOf100  = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation
+		decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano())))
+		mtx       = &sync.Mutex{}
+	)
+
+	return func(_ uint64) bool {
+		mtx.Lock()
+		result := decisions[i]
+		i++
+		if i == 100 {
+			i = 0
+		}
+		mtx.Unlock()
+		return result
+	}, nil
+}
+
+/**
+ * Reservoir sampling algorithm borrowed from Stack Overflow.
+ *
+ * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s
+ */
+func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool {
+	result := make([]bool, size)
+	chosen := make([]int, cardinality)
+	var i int
+	for i = 0; i < cardinality; i++ {
+		chosen[i] = i
+		result[i] = true
+	}
+	for ; i < size; i++ {
+		j := rnd.Intn(i + 1)
+		if j < cardinality {
+			result[chosen[j]] = false
+			result[i] = true
+			chosen[j] = i
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/span.go b/vendor/github.com/openzipkin/zipkin-go/span.go
new file mode 100644
index 00000000000..cc91568195d
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/span.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// Span interface as returned by Tracer.StartSpan()
+type Span interface {
+	// Context returns the Span's SpanContext.
+	Context() model.SpanContext
+
+	// SetName updates the Span's name.
+	SetName(string)
+
+	// SetRemoteEndpoint updates the Span's Remote Endpoint.
+	SetRemoteEndpoint(*model.Endpoint)
+
+	// Annotate adds a timed event to the Span.
+	Annotate(time.Time, string)
+
+	// Tag sets Tag with given key and value to the Span. If key already exists in
+	// the Span the value will be overridden except for error tags where the first
+	// value is persisted.
+	Tag(string, string)
+
+	// Finish the Span and send to Reporter. If DelaySend option was used at
+	// Span creation time, Finish will not send the Span to the Reporter. It then
+	// becomes the user's responsibility to get the Span reported (by using
+	// span.Flush).
+	Finish()
+
+	// Finish the Span with duration and send to Reporter. If DelaySend option was used at
+	// Span creation time, FinishedWithDuration will not send the Span to the Reporter. It then
+	// becomes the user's responsibility to get the Span reported (by using
+	// span.Flush).
+	FinishedWithDuration(duration time.Duration)
+
+	// Flush the Span to the Reporter (regardless of being finished or not).
+	// This can be used if the DelaySend SpanOption was set or when dealing with
+	// one-way RPC tracing where duration might not be measured.
+	Flush()
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/span_implementation.go b/vendor/github.com/openzipkin/zipkin-go/span_implementation.go
new file mode 100644
index 00000000000..72904a84f38
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/span_implementation.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+type spanImpl struct {
+	mtx sync.RWMutex
+	model.SpanModel
+	tracer        *Tracer
+	mustCollect   int32 // used as atomic bool (1 = true, 0 = false)
+	flushOnFinish bool
+}
+
+func (s *spanImpl) Context() model.SpanContext {
+	return s.SpanContext
+}
+
+func (s *spanImpl) SetName(name string) {
+	s.mtx.Lock()
+	s.Name = name
+	s.mtx.Unlock()
+}
+
+func (s *spanImpl) SetRemoteEndpoint(e *model.Endpoint) {
+	s.mtx.Lock()
+	if e == nil {
+		s.RemoteEndpoint = nil
+	} else {
+		s.RemoteEndpoint = &model.Endpoint{}
+		*s.RemoteEndpoint = *e
+	}
+	s.mtx.Unlock()
+}
+
+func (s *spanImpl) Annotate(t time.Time, value string) {
+	a := model.Annotation{
+		Timestamp: t,
+		Value:     value,
+	}
+
+	s.mtx.Lock()
+	s.Annotations = append(s.Annotations, a)
+	s.mtx.Unlock()
+}
+
+func (s *spanImpl) Tag(key, value string) {
+	s.mtx.Lock()
+
+	if key == string(TagError) {
+		if _, found := s.Tags[key]; found {
+			s.mtx.Unlock()
+			return
+		}
+	}
+
+	s.Tags[key] = value
+	s.mtx.Unlock()
+}
+
+func (s *spanImpl) Finish() {
+	if atomic.CompareAndSwapInt32(&s.mustCollect, 1, 0) {
+		s.Duration = time.Since(s.Timestamp)
+		if s.flushOnFinish {
+			s.tracer.reporter.Send(s.SpanModel)
+		}
+	}
+}
+
+func (s *spanImpl) FinishedWithDuration(d time.Duration) {
+	if atomic.CompareAndSwapInt32(&s.mustCollect, 1, 0) {
+		s.Duration = d
+		if s.flushOnFinish {
+			s.tracer.reporter.Send(s.SpanModel)
+		}
+	}
+}
+
+func (s *spanImpl) Flush() {
+	if s.SpanModel.Debug || (s.SpanModel.Sampled != nil && *s.SpanModel.Sampled) {
+		s.tracer.reporter.Send(s.SpanModel)
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/span_options.go b/vendor/github.com/openzipkin/zipkin-go/span_options.go
new file mode 100644
index 00000000000..5ac60bf35b4
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/span_options.go
@@ -0,0 +1,88 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"time"
+
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// SpanOption allows for functional options to adjust behavior and payload of
+// the Span to be created with tracer.StartSpan().
+type SpanOption func(t *Tracer, s *spanImpl)
+
+// Kind sets the kind of the span being created..
+func Kind(kind model.Kind) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		s.Kind = kind
+	}
+}
+
+// Parent will use provided SpanContext as parent to the span being created.
+func Parent(sc model.SpanContext) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		if sc.Err != nil {
+			// encountered an extraction error
+			switch t.extractFailurePolicy {
+			case ExtractFailurePolicyRestart:
+			case ExtractFailurePolicyError:
+				panic(s.SpanContext.Err)
+			case ExtractFailurePolicyTagAndRestart:
+				s.Tags["error.extract"] = sc.Err.Error()
+			default:
+				panic(ErrInvalidExtractFailurePolicy)
+			}
+			/* don't use provided SpanContext, but restart trace */
+			return
+		}
+		s.SpanContext = sc
+	}
+}
+
+// StartTime uses a given start time for the span being created.
+func StartTime(start time.Time) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		s.Timestamp = start
+	}
+}
+
+// RemoteEndpoint sets the remote endpoint of the span being created.
+func RemoteEndpoint(e *model.Endpoint) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		s.RemoteEndpoint = e
+	}
+}
+
+// Tags sets initial tags for the span being created. If default tracer tags
+// are present they will be overwritten on key collisions.
+func Tags(tags map[string]string) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		for k, v := range tags {
+			s.Tags[k] = v
+		}
+	}
+}
+
+// FlushOnFinish when set to false will disable span.Finish() to send the Span
+// to the Reporter automatically (which is the default behavior). If set to
+// false, having the Span be reported becomes the responsibility of the user.
+// This is available if late tag data is expected to be only available after the
+// required finish time of the Span.
+func FlushOnFinish(b bool) SpanOption {
+	return func(t *Tracer, s *spanImpl) {
+		s.flushOnFinish = b
+	}
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/tags.go b/vendor/github.com/openzipkin/zipkin-go/tags.go
new file mode 100644
index 00000000000..650913c9ba6
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/tags.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+// Tag holds available types
+type Tag string
+
+// Common Tag values
+const (
+	TagHTTPMethod       Tag = "http.method"
+	TagHTTPPath         Tag = "http.path"
+	TagHTTPUrl          Tag = "http.url"
+	TagHTTPRoute        Tag = "http.route"
+	TagHTTPStatusCode   Tag = "http.status_code"
+	TagHTTPRequestSize  Tag = "http.request.size"
+	TagHTTPResponseSize Tag = "http.response.size"
+	TagGRPCStatusCode   Tag = "grpc.status_code"
+	TagSQLQuery         Tag = "sql.query"
+	TagError            Tag = "error"
+)
+
+// Set a standard Tag with a payload on provided Span.
+func (t Tag) Set(s Span, value string) {
+	s.Tag(string(t), value)
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/tracer.go b/vendor/github.com/openzipkin/zipkin-go/tracer.go
new file mode 100644
index 00000000000..0f294cf2774
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/tracer.go
@@ -0,0 +1,187 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"context"
+	"sync/atomic"
+	"time"
+
+	"github.com/openzipkin/zipkin-go/idgenerator"
+	"github.com/openzipkin/zipkin-go/model"
+	"github.com/openzipkin/zipkin-go/propagation"
+	"github.com/openzipkin/zipkin-go/reporter"
+)
+
+// Tracer is our Zipkin tracer implementation. It should be initialized using
+// the NewTracer method.
+type Tracer struct {
+	defaultTags          map[string]string
+	extractFailurePolicy ExtractFailurePolicy
+	sampler              Sampler
+	generate             idgenerator.IDGenerator
+	reporter             reporter.Reporter
+	localEndpoint        *model.Endpoint
+	noop                 int32 // used as atomic bool (1 = true, 0 = false)
+	sharedSpans          bool
+	unsampledNoop        bool
+}
+
+// NewTracer returns a new Zipkin Tracer.
+func NewTracer(rep reporter.Reporter, opts ...TracerOption) (*Tracer, error) {
+	// set default tracer options
+	t := &Tracer{
+		defaultTags:          make(map[string]string),
+		extractFailurePolicy: ExtractFailurePolicyRestart,
+		sampler:              AlwaysSample,
+		generate:             idgenerator.NewRandom64(),
+		reporter:             rep,
+		localEndpoint:        nil,
+		noop:                 0,
+		sharedSpans:          true,
+		unsampledNoop:        false,
+	}
+
+	// if no reporter was provided we default to noop implementation.
+	if t.reporter == nil {
+		t.reporter = reporter.NewNoopReporter()
+		t.noop = 1
+	}
+
+	// process functional options
+	for _, opt := range opts {
+		if err := opt(t); err != nil {
+			return nil, err
+		}
+	}
+
+	return t, nil
+}
+
+// StartSpanFromContext creates and starts a span using the span found in
+// context as parent. If no parent span is found a root span is created.
+func (t *Tracer) StartSpanFromContext(ctx context.Context, name string, options ...SpanOption) (Span, context.Context) {
+	if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+		options = append(options, Parent(parentSpan.Context()))
+	}
+	span := t.StartSpan(name, options...)
+	return span, NewContext(ctx, span)
+}
+
+// StartSpan creates and starts a span.
+func (t *Tracer) StartSpan(name string, options ...SpanOption) Span {
+	if atomic.LoadInt32(&t.noop) == 1 {
+		return &noopSpan{}
+	}
+	s := &spanImpl{
+		SpanModel: model.SpanModel{
+			Kind:          model.Undetermined,
+			Name:          name,
+			LocalEndpoint: t.localEndpoint,
+			Annotations:   make([]model.Annotation, 0),
+			Tags:          make(map[string]string),
+		},
+		flushOnFinish: true,
+		tracer:        t,
+	}
+
+	// add default tracer tags to span
+	for k, v := range t.defaultTags {
+		s.Tag(k, v)
+	}
+
+	// handle provided functional options
+	for _, option := range options {
+		option(t, s)
+	}
+
+	if s.TraceID.Empty() {
+		// create root span
+		s.SpanContext.TraceID = t.generate.TraceID()
+		s.SpanContext.ID = t.generate.SpanID(s.SpanContext.TraceID)
+	} else {
+		// valid parent context found
+		if t.sharedSpans && s.Kind == model.Server {
+			// join span
+			s.Shared = true
+		} else {
+			// regular child span
+			parentID := s.SpanContext.ID
+			s.SpanContext.ParentID = &parentID
+			s.SpanContext.ID = t.generate.SpanID(model.TraceID{})
+		}
+	}
+
+	if !s.SpanContext.Debug && s.Sampled == nil {
+		// deferred sampled context found, invoke sampler
+		sampled := t.sampler(s.SpanContext.TraceID.Low)
+		s.SpanContext.Sampled = &sampled
+		if sampled {
+			s.mustCollect = 1
+		}
+	} else {
+		if s.SpanContext.Debug || *s.Sampled {
+			s.mustCollect = 1
+		}
+	}
+
+	if t.unsampledNoop && s.mustCollect == 0 {
+		// trace not being sampled and noop requested
+		return &noopSpan{
+			SpanContext: s.SpanContext,
+		}
+	}
+
+	// add start time
+	if s.Timestamp.IsZero() {
+		s.Timestamp = time.Now()
+	}
+
+	return s
+}
+
+// Extract extracts a SpanContext using the provided Extractor function.
+func (t *Tracer) Extract(extractor propagation.Extractor) (sc model.SpanContext) {
+	if atomic.LoadInt32(&t.noop) == 1 {
+		return
+	}
+	psc, err := extractor()
+	if psc != nil {
+		sc = *psc
+	}
+	sc.Err = err
+	return
+}
+
+// SetNoop allows for killswitch behavior. If set to true the tracer will return
+// noopSpans and all data is dropped. This allows operators to stop tracing in
+// risk scenarios. Set back to false to resume tracing.
+func (t *Tracer) SetNoop(noop bool) {
+	if noop {
+		atomic.CompareAndSwapInt32(&t.noop, 0, 1)
+	} else {
+		atomic.CompareAndSwapInt32(&t.noop, 1, 0)
+	}
+}
+
+// LocalEndpoint returns a copy of the currently set local endpoint of the
+// tracer instance.
+func (t *Tracer) LocalEndpoint() *model.Endpoint {
+	if t.localEndpoint == nil {
+		return nil
+	}
+	ep := *t.localEndpoint
+	return &ep
+}
diff --git a/vendor/github.com/openzipkin/zipkin-go/tracer_options.go b/vendor/github.com/openzipkin/zipkin-go/tracer_options.go
new file mode 100644
index 00000000000..533c5e478ef
--- /dev/null
+++ b/vendor/github.com/openzipkin/zipkin-go/tracer_options.go
@@ -0,0 +1,138 @@
+// Copyright 2019 The OpenZipkin Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package zipkin
+
+import (
+	"errors"
+
+	"github.com/openzipkin/zipkin-go/idgenerator"
+	"github.com/openzipkin/zipkin-go/model"
+)
+
+// Tracer Option Errors
+var (
+	ErrInvalidEndpoint             = errors.New("requires valid local endpoint")
+	ErrInvalidExtractFailurePolicy = errors.New("invalid extract failure policy provided")
+)
+
+// ExtractFailurePolicy deals with Extraction errors
+type ExtractFailurePolicy int
+
+// ExtractFailurePolicyOptions
+const (
+	ExtractFailurePolicyRestart ExtractFailurePolicy = iota
+	ExtractFailurePolicyError
+	ExtractFailurePolicyTagAndRestart
+)
+
+// TracerOption allows for functional options to adjust behavior of the Tracer
+// to be created with NewTracer().
+type TracerOption func(o *Tracer) error
+
+// WithLocalEndpoint sets the local endpoint of the tracer.
+func WithLocalEndpoint(e *model.Endpoint) TracerOption {
+	return func(o *Tracer) error {
+		if e == nil {
+			o.localEndpoint = nil
+			return nil
+		}
+		ep := *e
+		o.localEndpoint = &ep
+		return nil
+	}
+}
+
+// WithExtractFailurePolicy allows one to set the ExtractFailurePolicy.
+func WithExtractFailurePolicy(p ExtractFailurePolicy) TracerOption {
+	return func(o *Tracer) error {
+		if p < 0 || p > ExtractFailurePolicyTagAndRestart {
+			return ErrInvalidExtractFailurePolicy
+		}
+		o.extractFailurePolicy = p
+		return nil
+	}
+}
+
+// WithNoopSpan if set to true will switch to a NoopSpan implementation
+// if the trace is not sampled.
+func WithNoopSpan(unsampledNoop bool) TracerOption {
+	return func(o *Tracer) error {
+		o.unsampledNoop = unsampledNoop
+		return nil
+	}
+}
+
+// WithSharedSpans allows to place client-side and server-side annotations
+// for a RPC call in the same span (Zipkin V1 behavior) or different spans
+// (more in line with other tracing solutions). By default this Tracer
+// uses shared host spans (so client-side and server-side in the same span).
+func WithSharedSpans(val bool) TracerOption {
+	return func(o *Tracer) error {
+		o.sharedSpans = val
+		return nil
+	}
+}
+
+// WithSampler allows one to set a Sampler function
+func WithSampler(sampler Sampler) TracerOption {
+	return func(o *Tracer) error {
+		o.sampler = sampler
+		return nil
+	}
+}
+
+// WithTraceID128Bit if set to true will instruct the Tracer to start traces
+// with 128 bit TraceID's. If set to false the Tracer will start traces with
+// 64 bits.
+func WithTraceID128Bit(val bool) TracerOption {
+	return func(o *Tracer) error {
+		if val {
+			o.generate = idgenerator.NewRandom128()
+		} else {
+			o.generate = idgenerator.NewRandom64()
+		}
+		return nil
+	}
+}
+
+// WithIDGenerator allows one to set a custom ID Generator
+func WithIDGenerator(generator idgenerator.IDGenerator) TracerOption {
+	return func(o *Tracer) error {
+		o.generate = generator
+		return nil
+	}
+}
+
+// WithTags allows one to set default tags to be added to each created span
+func WithTags(tags map[string]string) TracerOption {
+	return func(o *Tracer) error {
+		for k, v := range tags {
+			o.defaultTags[k] = v
+		}
+		return nil
+	}
+}
+
+// WithNoopTracer allows one to start the Tracer as Noop implementation.
+func WithNoopTracer(tracerNoop bool) TracerOption {
+	return func(o *Tracer) error {
+		if tracerNoop {
+			o.noop = 1
+		} else {
+			o.noop = 0
+		}
+		return nil
+	}
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitignore b/vendor/github.com/oschwald/maxminddb-golang/.gitignore
new file mode 100644
index 00000000000..fe3fa4ab9b5
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.gitignore
@@ -0,0 +1,4 @@
+.vscode
+*.out
+*.sw?
+*.test
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitmodules b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
new file mode 100644
index 00000000000..400b2ab62c0
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "test-data"]
+	path = test-data
+	url = https://github.com/maxmind/MaxMind-DB.git
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
new file mode 100644
index 00000000000..835e8cccc1e
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
@@ -0,0 +1,30 @@
+[run]
+  deadline = "10m"
+  tests = true
+
+[linters]
+  disable-all = true
+  enable = [
+    "deadcode",
+    "depguard",
+    "errcheck",
+    "goconst",
+    "gocyclo",
+    "gocritic",
+    "gofmt",
+    "golint",
+    "gosec",
+    "gosimple",
+    "ineffassign",
+    "maligned",
+    "misspell",
+    "nakedret",
+    "staticcheck",
+    "structcheck",
+    "typecheck",
+    "unconvert",
+    "unparam",
+    "varcheck",
+    "vet",
+    "vetshadow",
+  ]
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.travis.yml b/vendor/github.com/oschwald/maxminddb-golang/.travis.yml
new file mode 100644
index 00000000000..f6639576817
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.travis.yml
@@ -0,0 +1,45 @@
+language: go
+
+go:
+  - 1.9
+  - "1.10"
+  - 1.11
+  - 1.12
+  - tip
+
+os:
+  - linux
+  - linux-ppc64le
+
+matrix:
+  allow_failures:
+    - go: tip
+
+install:
+  - go get -v -t ./...
+
+before_script:
+  - |
+    if [[ $TRAVIS_GO_VERSION == 1.12 && $(arch) != 'ppc64le' ]]; then
+      curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin
+    fi
+
+script:
+  - |
+    if [ $(arch) == "ppc64le" ]; then
+      go test -cpu 1,4 -v
+    else
+      go test -race -cpu 1,4 -v
+    fi
+  - |
+    if [ $(arch) == "ppc64le" ]; then
+      go test -v -tags appengine
+    else
+      go test -race -v -tags appengine
+    fi
+  - |
+    if [[ $TRAVIS_GO_VERSION == 1.12 && $(arch) != 'ppc64le' ]]; then
+      golangci-lint run
+    fi
+
+sudo: false
diff --git a/vendor/github.com/oschwald/maxminddb-golang/LICENSE b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
new file mode 100644
index 00000000000..2969677f159
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2015, Gregory J. Oschwald 
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/README.md b/vendor/github.com/oschwald/maxminddb-golang/README.md
new file mode 100644
index 00000000000..126d868a7c8
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/README.md
@@ -0,0 +1,38 @@
+# MaxMind DB Reader for Go #
+
+[![Build Status](https://travis-ci.org/oschwald/maxminddb-golang.svg?branch=master)](https://travis-ci.org/oschwald/maxminddb-golang)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/4j2f9oep8nnfrmov/branch/master?svg=true)](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
+[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.svg)](https://godoc.org/github.com/oschwald/maxminddb-golang)
+
+This is a Go reader for the MaxMind DB format. Although this can be used to
+read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
+[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
+[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
+API for doing so.
+
+This is not an official MaxMind API.
+
+## Installation ##
+
+```
+go get github.com/oschwald/maxminddb-golang
+```
+
+## Usage ##
+
+[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
+documentation and examples.
+
+## Examples ##
+
+See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
+`example_test.go` for examples.
+
+## Contributing ##
+
+Contributions welcome! Please fork the repository and open a pull request
+with your changes.
+
+## License ##
+
+This is free software, licensed under the ISC License.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml b/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
new file mode 100644
index 00000000000..e2bb9dd2375
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
@@ -0,0 +1,19 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
+
+environment:
+  GOPATH: c:\gopath
+
+install:
+  - echo %PATH%
+  - echo %GOPATH%
+  - git submodule update --init --recursive
+  - go version
+  - go env
+  - go get -v -t ./...
+
+build_script:
+  - go test -v ./...
diff --git a/vendor/github.com/oschwald/maxminddb-golang/decoder.go b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
new file mode 100644
index 00000000000..2617e8b55d1
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
@@ -0,0 +1,711 @@
+package maxminddb
+
+import (
+	"encoding/binary"
+	"math"
+	"math/big"
+	"reflect"
+	"sync"
+)
+
+type decoder struct {
+	buffer []byte
+}
+
+type dataType int
+
+const (
+	_Extended dataType = iota
+	_Pointer
+	_String
+	_Float64
+	_Bytes
+	_Uint16
+	_Uint32
+	_Map
+	_Int32
+	_Uint64
+	_Uint128
+	_Slice
+	// We don't use the next two. They are placeholders. See the spec
+	// for more details.
+	_Container // nolint: deadcode, varcheck
+	_Marker    // nolint: deadcode, varcheck
+	_Bool
+	_Float32
+)
+
+const (
+	// This is the value used in libmaxminddb
+	maximumDataStructureDepth = 512
+)
+
+func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
+	if depth > maximumDataStructureDepth {
+		return 0, newInvalidDatabaseError("exceeded maximum data structure depth; database is likely corrupt")
+	}
+	typeNum, size, newOffset, err := d.decodeCtrlData(offset)
+	if err != nil {
+		return 0, err
+	}
+
+	if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
+		result.Set(reflect.ValueOf(uintptr(offset)))
+		return d.nextValueOffset(offset, 1)
+	}
+	return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
+}
+
+func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
+	newOffset := offset + 1
+	if offset >= uint(len(d.buffer)) {
+		return 0, 0, 0, newOffsetError()
+	}
+	ctrlByte := d.buffer[offset]
+
+	typeNum := dataType(ctrlByte >> 5)
+	if typeNum == _Extended {
+		if newOffset >= uint(len(d.buffer)) {
+			return 0, 0, 0, newOffsetError()
+		}
+		typeNum = dataType(d.buffer[newOffset] + 7)
+		newOffset++
+	}
+
+	var size uint
+	size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
+	return typeNum, size, newOffset, err
+}
+
+func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint, error) {
+	size := uint(ctrlByte & 0x1f)
+	if typeNum == _Extended {
+		return size, offset, nil
+	}
+
+	var bytesToRead uint
+	if size < 29 {
+		return size, offset, nil
+	}
+
+	bytesToRead = size - 28
+	newOffset := offset + bytesToRead
+	if newOffset > uint(len(d.buffer)) {
+		return 0, 0, newOffsetError()
+	}
+	if size == 29 {
+		return 29 + uint(d.buffer[offset]), offset + 1, nil
+	}
+
+	sizeBytes := d.buffer[offset:newOffset]
+
+	switch {
+	case size == 30:
+		size = 285 + uintFromBytes(0, sizeBytes)
+	case size > 30:
+		size = uintFromBytes(0, sizeBytes) + 65821
+	}
+	return size, newOffset, nil
+}
+
+func (d *decoder) decodeFromType(
+	dtype dataType,
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	result = d.indirect(result)
+
+	// For these types, size has a special meaning
+	switch dtype {
+	case _Bool:
+		return d.unmarshalBool(size, offset, result)
+	case _Map:
+		return d.unmarshalMap(size, offset, result, depth)
+	case _Pointer:
+		return d.unmarshalPointer(size, offset, result, depth)
+	case _Slice:
+		return d.unmarshalSlice(size, offset, result, depth)
+	}
+
+	// For the remaining types, size is the byte size
+	if offset+size > uint(len(d.buffer)) {
+		return 0, newOffsetError()
+	}
+	switch dtype {
+	case _Bytes:
+		return d.unmarshalBytes(size, offset, result)
+	case _Float32:
+		return d.unmarshalFloat32(size, offset, result)
+	case _Float64:
+		return d.unmarshalFloat64(size, offset, result)
+	case _Int32:
+		return d.unmarshalInt32(size, offset, result)
+	case _String:
+		return d.unmarshalString(size, offset, result)
+	case _Uint16:
+		return d.unmarshalUint(size, offset, result, 16)
+	case _Uint32:
+		return d.unmarshalUint(size, offset, result, 32)
+	case _Uint64:
+		return d.unmarshalUint(size, offset, result, 64)
+	case _Uint128:
+		return d.unmarshalUint128(size, offset, result)
+	default:
+		return 0, newInvalidDatabaseError("unknown type: %d", dtype)
+	}
+}
+
+func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (uint, error) {
+	if size > 1 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
+	}
+	value, newOffset := d.decodeBool(size, offset)
+
+	switch result.Kind() {
+	case reflect.Bool:
+		result.SetBool(value)
+		return newOffset, nil
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+// indirect follows pointers and create values as necessary. This is
+// heavily based on encoding/json as my original version had a subtle
+// bug. This method should be considered to be licensed under
+// https://golang.org/LICENSE
+func (d *decoder) indirect(result reflect.Value) reflect.Value {
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if result.Kind() == reflect.Interface && !result.IsNil() {
+			e := result.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() {
+				result = e
+				continue
+			}
+		}
+
+		if result.Kind() != reflect.Ptr {
+			break
+		}
+
+		if result.IsNil() {
+			result.Set(reflect.New(result.Type().Elem()))
+		}
+		result = result.Elem()
+	}
+	return result
+}
+
+var sliceType = reflect.TypeOf([]byte{})
+
+func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
+	value, newOffset := d.decodeBytes(size, offset)
+
+	switch result.Kind() {
+	case reflect.Slice:
+		if result.Type() == sliceType {
+			result.SetBytes(value)
+			return newOffset, nil
+		}
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value) (uint, error) {
+	if size != 4 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
+	}
+	value, newOffset := d.decodeFloat32(size, offset)
+
+	switch result.Kind() {
+	case reflect.Float32, reflect.Float64:
+		result.SetFloat(float64(value))
+		return newOffset, nil
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value) (uint, error) {
+
+	if size != 8 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
+	}
+	value, newOffset := d.decodeFloat64(size, offset)
+
+	switch result.Kind() {
+	case reflect.Float32, reflect.Float64:
+		if result.OverflowFloat(value) {
+			return 0, newUnmarshalTypeError(value, result.Type())
+		}
+		result.SetFloat(value)
+		return newOffset, nil
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (uint, error) {
+	if size > 4 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
+	}
+	value, newOffset := d.decodeInt(size, offset)
+
+	switch result.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		n := int64(value)
+		if !result.OverflowInt(n) {
+			result.SetInt(n)
+			return newOffset, nil
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		n := uint64(value)
+		if !result.OverflowUint(n) {
+			result.SetUint(n)
+			return newOffset, nil
+		}
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalMap(
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	result = d.indirect(result)
+	switch result.Kind() {
+	default:
+		return 0, newUnmarshalTypeError("map", result.Type())
+	case reflect.Struct:
+		return d.decodeStruct(size, offset, result, depth)
+	case reflect.Map:
+		return d.decodeMap(size, offset, result, depth)
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			rv := reflect.ValueOf(make(map[string]interface{}, size))
+			newOffset, err := d.decodeMap(size, offset, rv, depth)
+			result.Set(rv)
+			return newOffset, err
+		}
+		return 0, newUnmarshalTypeError("map", result.Type())
+	}
+}
+
+func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value, depth int) (uint, error) {
+	pointer, newOffset, err := d.decodePointer(size, offset)
+	if err != nil {
+		return 0, err
+	}
+	_, err = d.decode(pointer, result, depth)
+	return newOffset, err
+}
+
+func (d *decoder) unmarshalSlice(
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	switch result.Kind() {
+	case reflect.Slice:
+		return d.decodeSlice(size, offset, result, depth)
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			a := []interface{}{}
+			rv := reflect.ValueOf(&a).Elem()
+			newOffset, err := d.decodeSlice(size, offset, rv, depth)
+			result.Set(rv)
+			return newOffset, err
+		}
+	}
+	return 0, newUnmarshalTypeError("array", result.Type())
+}
+
+func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
+	value, newOffset := d.decodeString(size, offset)
+
+	switch result.Kind() {
+	case reflect.String:
+		result.SetString(value)
+		return newOffset, nil
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+
+}
+
+func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, uintType uint) (uint, error) {
+	if size > uintType/8 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
+	}
+
+	value, newOffset := d.decodeUint(size, offset)
+
+	switch result.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		n := int64(value)
+		if !result.OverflowInt(n) {
+			result.SetInt(n)
+			return newOffset, nil
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		if !result.OverflowUint(value) {
+			result.SetUint(value)
+			return newOffset, nil
+		}
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+var bigIntType = reflect.TypeOf(big.Int{})
+
+func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value) (uint, error) {
+	if size > 16 {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
+	}
+	value, newOffset := d.decodeUint128(size, offset)
+
+	switch result.Kind() {
+	case reflect.Struct:
+		if result.Type() == bigIntType {
+			result.Set(reflect.ValueOf(*value))
+			return newOffset, nil
+		}
+	case reflect.Interface:
+		if result.NumMethod() == 0 {
+			result.Set(reflect.ValueOf(value))
+			return newOffset, nil
+		}
+	}
+	return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) decodeBool(size uint, offset uint) (bool, uint) {
+	return size != 0, offset
+}
+
+func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint) {
+	newOffset := offset + size
+	bytes := make([]byte, size)
+	copy(bytes, d.buffer[offset:newOffset])
+	return bytes, newOffset
+}
+
+func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint) {
+	newOffset := offset + size
+	bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
+	return math.Float64frombits(bits), newOffset
+}
+
+func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint) {
+	newOffset := offset + size
+	bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
+	return math.Float32frombits(bits), newOffset
+}
+
+func (d *decoder) decodeInt(size uint, offset uint) (int, uint) {
+	newOffset := offset + size
+	var val int32
+	for _, b := range d.buffer[offset:newOffset] {
+		val = (val << 8) | int32(b)
+	}
+	return int(val), newOffset
+}
+
+func (d *decoder) decodeMap(
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	if result.IsNil() {
+		result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
+	}
+
+	mapType := result.Type()
+	keyValue := reflect.New(mapType.Key()).Elem()
+	elemType := mapType.Elem()
+	elemKind := elemType.Kind()
+	var elemValue reflect.Value
+	for i := uint(0); i < size; i++ {
+		var key []byte
+		var err error
+		key, offset, err = d.decodeKey(offset)
+
+		if err != nil {
+			return 0, err
+		}
+
+		if !elemValue.IsValid() || elemKind == reflect.Interface {
+			elemValue = reflect.New(elemType).Elem()
+		}
+
+		offset, err = d.decode(offset, elemValue, depth)
+		if err != nil {
+			return 0, err
+		}
+
+		keyValue.SetString(string(key))
+		result.SetMapIndex(keyValue, elemValue)
+	}
+	return offset, nil
+}
+
+func (d *decoder) decodePointer(
+	size uint,
+	offset uint,
+) (uint, uint, error) {
+	pointerSize := ((size >> 3) & 0x3) + 1
+	newOffset := offset + pointerSize
+	if newOffset > uint(len(d.buffer)) {
+		return 0, 0, newOffsetError()
+	}
+	pointerBytes := d.buffer[offset:newOffset]
+	var prefix uint
+	if pointerSize == 4 {
+		prefix = 0
+	} else {
+		prefix = size & 0x7
+	}
+	unpacked := uintFromBytes(prefix, pointerBytes)
+
+	var pointerValueOffset uint
+	switch pointerSize {
+	case 1:
+		pointerValueOffset = 0
+	case 2:
+		pointerValueOffset = 2048
+	case 3:
+		pointerValueOffset = 526336
+	case 4:
+		pointerValueOffset = 0
+	}
+
+	pointer := unpacked + pointerValueOffset
+
+	return pointer, newOffset, nil
+}
+
+func (d *decoder) decodeSlice(
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
+	for i := 0; i < int(size); i++ {
+		var err error
+		offset, err = d.decode(offset, result.Index(i), depth)
+		if err != nil {
+			return 0, err
+		}
+	}
+	return offset, nil
+}
+
+func (d *decoder) decodeString(size uint, offset uint) (string, uint) {
+	newOffset := offset + size
+	return string(d.buffer[offset:newOffset]), newOffset
+}
+
+func (d *decoder) decodeStruct(
+	size uint,
+	offset uint,
+	result reflect.Value,
+	depth int,
+) (uint, error) {
+	fields := cachedFields(result)
+
+	// This fills in embedded structs
+	for _, i := range fields.anonymousFields {
+		_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// This handles named fields
+	for i := uint(0); i < size; i++ {
+		var (
+			err error
+			key []byte
+		)
+		key, offset, err = d.decodeKey(offset)
+		if err != nil {
+			return 0, err
+		}
+		// The string() does not create a copy due to this compiler
+		// optimization: https://github.com/golang/go/issues/3512
+		j, ok := fields.namedFields[string(key)]
+		if !ok {
+			offset, err = d.nextValueOffset(offset, 1)
+			if err != nil {
+				return 0, err
+			}
+			continue
+		}
+
+		offset, err = d.decode(offset, result.Field(j), depth)
+		if err != nil {
+			return 0, err
+		}
+	}
+	return offset, nil
+}
+
+type fieldsType struct {
+	namedFields     map[string]int
+	anonymousFields []int
+}
+
+var fieldsMap sync.Map
+
+func cachedFields(result reflect.Value) *fieldsType {
+	resultType := result.Type()
+
+	if fields, ok := fieldsMap.Load(resultType); ok {
+		return fields.(*fieldsType)
+	}
+	numFields := resultType.NumField()
+	namedFields := make(map[string]int, numFields)
+	var anonymous []int
+	for i := 0; i < numFields; i++ {
+		field := resultType.Field(i)
+
+		fieldName := field.Name
+		if tag := field.Tag.Get("maxminddb"); tag != "" {
+			if tag == "-" {
+				continue
+			}
+			fieldName = tag
+		}
+		if field.Anonymous {
+			anonymous = append(anonymous, i)
+			continue
+		}
+		namedFields[fieldName] = i
+	}
+	fields := &fieldsType{namedFields, anonymous}
+	fieldsMap.Store(resultType, fields)
+
+	return fields
+}
+
+func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint) {
+	newOffset := offset + size
+	bytes := d.buffer[offset:newOffset]
+
+	var val uint64
+	for _, b := range bytes {
+		val = (val << 8) | uint64(b)
+	}
+	return val, newOffset
+}
+
+func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint) {
+	newOffset := offset + size
+	val := new(big.Int)
+	val.SetBytes(d.buffer[offset:newOffset])
+
+	return val, newOffset
+}
+
+func uintFromBytes(prefix uint, uintBytes []byte) uint {
+	val := prefix
+	for _, b := range uintBytes {
+		val = (val << 8) | uint(b)
+	}
+	return val
+}
+
+// decodeKey decodes a map key into []byte slice. We use a []byte so that we
+// can take advantage of https://github.com/golang/go/issues/3512 to avoid
+// copying the bytes when decoding a struct. Previously, we achieved this by
+// using unsafe.
+func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
+	typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
+	if err != nil {
+		return nil, 0, err
+	}
+	if typeNum == _Pointer {
+		pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
+		if err != nil {
+			return nil, 0, err
+		}
+		key, _, err := d.decodeKey(pointer)
+		return key, ptrOffset, err
+	}
+	if typeNum != _String {
+		return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
+	}
+	newOffset := dataOffset + size
+	if newOffset > uint(len(d.buffer)) {
+		return nil, 0, newOffsetError()
+	}
+	return d.buffer[dataOffset:newOffset], newOffset, nil
+}
+
+// This function is used to skip ahead to the next value without decoding
+// the one at the offset passed in. The size bits have different meanings for
+// different data types
+func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) (uint, error) {
+	if numberToSkip == 0 {
+		return offset, nil
+	}
+	typeNum, size, offset, err := d.decodeCtrlData(offset)
+	if err != nil {
+		return 0, err
+	}
+	switch typeNum {
+	case _Pointer:
+		_, offset, err = d.decodePointer(size, offset)
+		if err != nil {
+			return 0, err
+		}
+	case _Map:
+		numberToSkip += 2 * size
+	case _Slice:
+		numberToSkip += size
+	case _Bool:
+	default:
+		offset += size
+	}
+	return d.nextValueOffset(offset, numberToSkip-1)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/errors.go b/vendor/github.com/oschwald/maxminddb-golang/errors.go
new file mode 100644
index 00000000000..132780019bb
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/errors.go
@@ -0,0 +1,42 @@
+package maxminddb
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// InvalidDatabaseError is returned when the database contains invalid data
+// and cannot be parsed.
+type InvalidDatabaseError struct {
+	message string
+}
+
+func newOffsetError() InvalidDatabaseError {
+	return InvalidDatabaseError{"unexpected end of database"}
+}
+
+func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
+	return InvalidDatabaseError{fmt.Sprintf(format, args...)}
+}
+
+func (e InvalidDatabaseError) Error() string {
+	return e.message
+}
+
+// UnmarshalTypeError is returned when the value in the database cannot be
+// assigned to the specified data type.
+type UnmarshalTypeError struct {
+	Value string       // stringified copy of the database value that caused the error
+	Type  reflect.Type // type of the value that could not be assign to
+}
+
+func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
+	return UnmarshalTypeError{
+		Value: fmt.Sprintf("%v", value),
+		Type:  rType,
+	}
+}
+
+func (e UnmarshalTypeError) Error() string {
+	return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
new file mode 100644
index 00000000000..d898d25704e
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
@@ -0,0 +1,15 @@
+// +build !windows,!appengine
+
+package maxminddb
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func mmap(fd int, length int) (data []byte, err error) {
+	return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
+}
+
+func munmap(b []byte) (err error) {
+	return unix.Munmap(b)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
new file mode 100644
index 00000000000..661250eca00
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
@@ -0,0 +1,85 @@
+// +build windows,!appengine
+
+package maxminddb
+
+// Windows support largely borrowed from mmap-go.
+//
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+import (
+	"errors"
+	"os"
+	"reflect"
+	"sync"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+type memoryMap []byte
+
+// Windows
+var handleLock sync.Mutex
+var handleMap = map[uintptr]windows.Handle{}
+
+func mmap(fd int, length int) (data []byte, err error) {
+	h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
+		uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
+	if h == 0 {
+		return nil, os.NewSyscallError("CreateFileMapping", errno)
+	}
+
+	addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
+		0, uintptr(length))
+	if addr == 0 {
+		return nil, os.NewSyscallError("MapViewOfFile", errno)
+	}
+	handleLock.Lock()
+	handleMap[addr] = h
+	handleLock.Unlock()
+
+	m := memoryMap{}
+	dh := m.header()
+	dh.Data = addr
+	dh.Len = length
+	dh.Cap = dh.Len
+
+	return m, nil
+}
+
+func (m *memoryMap) header() *reflect.SliceHeader {
+	return (*reflect.SliceHeader)(unsafe.Pointer(m))
+}
+
+func flush(addr, len uintptr) error {
+	errno := windows.FlushViewOfFile(addr, len)
+	return os.NewSyscallError("FlushViewOfFile", errno)
+}
+
+func munmap(b []byte) (err error) {
+	m := memoryMap(b)
+	dh := m.header()
+
+	addr := dh.Data
+	length := uintptr(dh.Len)
+
+	flush(addr, length)
+	err = windows.UnmapViewOfFile(addr)
+	if err != nil {
+		return err
+	}
+
+	handleLock.Lock()
+	defer handleLock.Unlock()
+	handle, ok := handleMap[addr]
+	if !ok {
+		// should be impossible; we would've errored above
+		return errors.New("unknown base address")
+	}
+	delete(handleMap, addr)
+
+	e := windows.CloseHandle(windows.Handle(handle))
+	return os.NewSyscallError("CloseHandle", e)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/node.go b/vendor/github.com/oschwald/maxminddb-golang/node.go
new file mode 100644
index 00000000000..68990dbf143
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/node.go
@@ -0,0 +1,42 @@
+package maxminddb
+
+type nodeReader interface {
+	readLeft(uint) uint
+	readRight(uint) uint
+}
+
+type nodeReader24 struct {
+	buffer []byte
+}
+
+func (n nodeReader24) readLeft(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader24) readRight(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber+3]) << 16) | (uint(n.buffer[nodeNumber+4]) << 8) | uint(n.buffer[nodeNumber+5])
+}
+
+type nodeReader28 struct {
+	buffer []byte
+}
+
+func (n nodeReader28) readLeft(nodeNumber uint) uint {
+	return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) | (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader28) readRight(nodeNumber uint) uint {
+	return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) | (uint(n.buffer[nodeNumber+4]) << 16) | (uint(n.buffer[nodeNumber+5]) << 8) | uint(n.buffer[nodeNumber+6])
+}
+
+type nodeReader32 struct {
+	buffer []byte
+}
+
+func (n nodeReader32) readLeft(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber]) << 24) | (uint(n.buffer[nodeNumber+1]) << 16) | (uint(n.buffer[nodeNumber+2]) << 8) | uint(n.buffer[nodeNumber+3])
+}
+
+func (n nodeReader32) readRight(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber+4]) << 24) | (uint(n.buffer[nodeNumber+5]) << 16) | (uint(n.buffer[nodeNumber+6]) << 8) | uint(n.buffer[nodeNumber+7])
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader.go b/vendor/github.com/oschwald/maxminddb-golang/reader.go
new file mode 100644
index 00000000000..a4c2cec5f53
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader.go
@@ -0,0 +1,288 @@
+package maxminddb
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net"
+	"reflect"
+)
+
+const (
+	// NotFound is returned by LookupOffset when a matched root record offset
+	// cannot be found.
+	NotFound = ^uintptr(0)
+
+	dataSectionSeparatorSize = 16
+)
+
+var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
+
+// Reader holds the data corresponding to the MaxMind DB file. Its only public
+// field is Metadata, which contains the metadata from the MaxMind DB file.
+type Reader struct {
+	hasMappedFile     bool
+	buffer            []byte
+	nodeReader        nodeReader
+	decoder           decoder
+	Metadata          Metadata
+	ipv4Start         uint
+	ipv4StartBitDepth int
+	nodeOffsetMult    uint
+}
+
+// Metadata holds the metadata decoded from the MaxMind DB file. In particular
+// in has the format version, the build time as Unix epoch time, the database
+// type and description, the IP version supported, and a slice of the natural
+// languages included.
+type Metadata struct {
+	BinaryFormatMajorVersion uint              `maxminddb:"binary_format_major_version"`
+	BinaryFormatMinorVersion uint              `maxminddb:"binary_format_minor_version"`
+	BuildEpoch               uint              `maxminddb:"build_epoch"`
+	DatabaseType             string            `maxminddb:"database_type"`
+	Description              map[string]string `maxminddb:"description"`
+	IPVersion                uint              `maxminddb:"ip_version"`
+	Languages                []string          `maxminddb:"languages"`
+	NodeCount                uint              `maxminddb:"node_count"`
+	RecordSize               uint              `maxminddb:"record_size"`
+}
+
+// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
+// a Reader structure or an error.
+func FromBytes(buffer []byte) (*Reader, error) {
+	metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
+
+	if metadataStart == -1 {
+		return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
+	}
+
+	metadataStart += len(metadataStartMarker)
+	metadataDecoder := decoder{buffer[metadataStart:]}
+
+	var metadata Metadata
+
+	rvMetdata := reflect.ValueOf(&metadata)
+	_, err := metadataDecoder.decode(0, rvMetdata, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
+	dataSectionStart := searchTreeSize + dataSectionSeparatorSize
+	dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
+	if dataSectionStart > dataSectionEnd {
+		return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
+	}
+	d := decoder{
+		buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
+	}
+
+	nodeBuffer := buffer[:searchTreeSize]
+	var nodeReader nodeReader
+	switch metadata.RecordSize {
+	case 24:
+		nodeReader = nodeReader24{buffer: nodeBuffer}
+	case 28:
+		nodeReader = nodeReader28{buffer: nodeBuffer}
+	case 32:
+		nodeReader = nodeReader32{buffer: nodeBuffer}
+	default:
+		return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
+	}
+
+	reader := &Reader{
+		buffer:         buffer,
+		nodeReader:     nodeReader,
+		decoder:        d,
+		Metadata:       metadata,
+		ipv4Start:      0,
+		nodeOffsetMult: metadata.RecordSize / 4,
+	}
+
+	reader.setIPv4Start()
+
+	return reader, err
+}
+
+func (r *Reader) setIPv4Start() {
+	if r.Metadata.IPVersion != 6 {
+		return
+	}
+
+	nodeCount := r.Metadata.NodeCount
+
+	node := uint(0)
+	i := 0
+	for ; i < 96 && node < nodeCount; i++ {
+		node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
+	}
+	r.ipv4Start = node
+	r.ipv4StartBitDepth = i
+}
+
+// Lookup retrieves the database record for ip and stores it in the value
+// pointed to by result. If result is nil or not a pointer, an error is
+// returned. If the data in the database record cannot be stored in result
+// because of type differences, an UnmarshalTypeError is returned. If the
+// database is invalid or otherwise cannot be read, an InvalidDatabaseError
+// is returned.
+func (r *Reader) Lookup(ip net.IP, result interface{}) error {
+	if r.buffer == nil {
+		return errors.New("cannot call Lookup on a closed database")
+	}
+	pointer, _, _, err := r.lookupPointer(ip)
+	if pointer == 0 || err != nil {
+		return err
+	}
+	return r.retrieveData(pointer, result)
+}
+
+// LookupNetwork retrieves the database record for ip and stores it in the
+// value pointed to by result. The network returned is the network associated
+// with the data record in the database. The ok return value indicates whether
+// the database contained a record for the ip.
+//
+// If result is nil or not a pointer, an error is returned. If the data in the
+// database record cannot be stored in result because of type differences, an
+// UnmarshalTypeError is returned. If the database is invalid or otherwise
+// cannot be read, an InvalidDatabaseError is returned.
+func (r *Reader) LookupNetwork(ip net.IP, result interface{}) (network *net.IPNet, ok bool, err error) {
+	if r.buffer == nil {
+		return nil, false, errors.New("cannot call Lookup on a closed database")
+	}
+	pointer, prefixLength, ip, err := r.lookupPointer(ip)
+
+	network = r.cidr(ip, prefixLength)
+	if pointer == 0 || err != nil {
+		return network, false, err
+	}
+
+	return network, true, r.retrieveData(pointer, result)
+}
+
+// LookupOffset maps an argument net.IP to a corresponding record offset in the
+// database. NotFound is returned if no such record is found, and a record may
+// otherwise be extracted by passing the returned offset to Decode. LookupOffset
+// is an advanced API, which exists to provide clients with a means to cache
+// previously-decoded records.
+func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
+	if r.buffer == nil {
+		return 0, errors.New("cannot call LookupOffset on a closed database")
+	}
+	pointer, _, _, err := r.lookupPointer(ip)
+	if pointer == 0 || err != nil {
+		return NotFound, err
+	}
+	return r.resolveDataPointer(pointer)
+}
+
+func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
+	// This is necessary as the node that the IPv4 start is at may
+	// be at a bit depth that is less that 96, i.e., ipv4Start points
+	// to a leaf node. For instance, if a record was inserted at ::/8,
+	// the ipv4Start would point directly at the leaf node for the
+	// record and would have a bit depth of 8. This would not happen
+	// with databases currently distributed by MaxMind as all of them
+	// have an IPv4 subtree that is greater than a single node.
+	if r.Metadata.IPVersion == 6 &&
+		len(ip) == net.IPv4len &&
+		r.ipv4StartBitDepth != 96 {
+		return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
+	}
+
+	mask := net.CIDRMask(prefixLength, len(ip)*8)
+	return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
+}
+
+// Decode the record at |offset| into |result|. The result value pointed to
+// must be a data value that corresponds to a record in the database. This may
+// include a struct representation of the data, a map capable of holding the
+// data or an empty interface{} value.
+//
+// If result is a pointer to a struct, the struct need not include a field
+// for every value that may be in the database. If a field is not present in
+// the structure, the decoder will not decode that field, reducing the time
+// required to decode the record.
+//
+// As a special case, a struct field of type uintptr will be used to capture
+// the offset of the value. Decode may later be used to extract the stored
+// value from the offset. MaxMind DBs are highly normalized: for example in
+// the City database, all records of the same country will reference a
+// single representative record for that country. This uintptr behavior allows
+// clients to leverage this normalization in their own sub-record caching.
+func (r *Reader) Decode(offset uintptr, result interface{}) error {
+	if r.buffer == nil {
+		return errors.New("cannot call Decode on a closed database")
+	}
+	return r.decode(offset, result)
+}
+
+func (r *Reader) decode(offset uintptr, result interface{}) error {
+	rv := reflect.ValueOf(result)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return errors.New("result param must be a pointer")
+	}
+
+	_, err := r.decoder.decode(uint(offset), rv, 0)
+	return err
+}
+
+func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
+	if ip == nil {
+		return 0, 0, ip, errors.New("IP passed to Lookup cannot be nil")
+	}
+
+	ipV4Address := ip.To4()
+	if ipV4Address != nil {
+		ip = ipV4Address
+	}
+	if len(ip) == 16 && r.Metadata.IPVersion == 4 {
+		return 0, 0, ip, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ip.String())
+	}
+
+	bitCount := uint(len(ip) * 8)
+
+	var node uint
+	if bitCount == 32 {
+		node = r.ipv4Start
+	}
+
+	nodeCount := r.Metadata.NodeCount
+
+	i := uint(0)
+	for ; i < bitCount && node < nodeCount; i++ {
+		bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
+
+		offset := node * r.nodeOffsetMult
+		if bit == 0 {
+			node = r.nodeReader.readLeft(offset)
+		} else {
+			node = r.nodeReader.readRight(offset)
+		}
+	}
+	if node == nodeCount {
+		// Record is empty
+		return 0, int(i), ip, nil
+	} else if node > nodeCount {
+		return node, int(i), ip, nil
+	}
+
+	return 0, int(i), ip, newInvalidDatabaseError("invalid node in search tree")
+}
+
+func (r *Reader) retrieveData(pointer uint, result interface{}) error {
+	offset, err := r.resolveDataPointer(pointer)
+	if err != nil {
+		return err
+	}
+	return r.decode(offset, result)
+}
+
+func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
+	var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
+
+	if resolved > uintptr(len(r.buffer)) {
+		return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
+	}
+	return resolved, nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
new file mode 100644
index 00000000000..d200f9fe05e
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
@@ -0,0 +1,28 @@
+// +build appengine
+
+package maxminddb
+
+import "io/ioutil"
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+	bytes, err := ioutil.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+
+	return FromBytes(bytes)
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method sets the underlying buffer
+// to nil, returning the resources to the system.
+func (r *Reader) Close() error {
+	r.buffer = nil
+	return nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
new file mode 100644
index 00000000000..2a89fa676e8
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
@@ -0,0 +1,63 @@
+// +build !appengine
+
+package maxminddb
+
+import (
+	"os"
+	"runtime"
+)
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+	mapFile, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if rerr := mapFile.Close(); rerr != nil {
+			err = rerr
+		}
+	}()
+
+	stats, err := mapFile.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	fileSize := int(stats.Size())
+	mmap, err := mmap(int(mapFile.Fd()), fileSize)
+	if err != nil {
+		return nil, err
+	}
+
+	reader, err := FromBytes(mmap)
+	if err != nil {
+		if err2 := munmap(mmap); err2 != nil {
+			// failing to unmap the file is probably the more severe error
+			return nil, err2
+		}
+		return nil, err
+	}
+
+	reader.hasMappedFile = true
+	runtime.SetFinalizer(reader, (*Reader).Close)
+	return reader, err
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method does nothing.
+func (r *Reader) Close() error {
+	var err error
+	if r.hasMappedFile {
+		runtime.SetFinalizer(r, nil)
+		r.hasMappedFile = false
+		err = munmap(r.buffer)
+	}
+	r.buffer = nil
+	return err
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/traverse.go b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
new file mode 100644
index 00000000000..a1eb75c4b86
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
@@ -0,0 +1,97 @@
+package maxminddb
+
+import "net"
+
+// Internal structure used to keep track of nodes we still need to visit.
+type netNode struct {
+	ip      net.IP
+	bit     uint
+	pointer uint
+}
+
+// Networks represents a set of subnets that we are iterating over.
+type Networks struct {
+	reader   *Reader
+	nodes    []netNode // Nodes we still have to visit.
+	lastNode netNode
+	err      error
+}
+
+// Networks returns an iterator that can be used to traverse all networks in
+// the database.
+//
+// Please note that a MaxMind DB may map IPv4 networks into several locations
+// in in an IPv6 database. This iterator will iterate over all of these
+// locations separately.
+func (r *Reader) Networks() *Networks {
+	s := 4
+	if r.Metadata.IPVersion == 6 {
+		s = 16
+	}
+	return &Networks{
+		reader: r,
+		nodes: []netNode{
+			{
+				ip: make(net.IP, s),
+			},
+		},
+	}
+}
+
+// Next prepares the next network for reading with the Network method. It
+// returns true if there is another network to be processed and false if there
+// are no more networks or if there is an error.
+func (n *Networks) Next() bool {
+	for len(n.nodes) > 0 {
+		node := n.nodes[len(n.nodes)-1]
+		n.nodes = n.nodes[:len(n.nodes)-1]
+
+		for node.pointer != n.reader.Metadata.NodeCount {
+			if node.pointer > n.reader.Metadata.NodeCount {
+				n.lastNode = node
+				return true
+			}
+			ipRight := make(net.IP, len(node.ip))
+			copy(ipRight, node.ip)
+			if len(ipRight) <= int(node.bit>>3) {
+				n.err = newInvalidDatabaseError(
+					"invalid search tree at %v/%v", ipRight, node.bit)
+				return false
+			}
+			ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
+
+			offset := node.pointer * n.reader.nodeOffsetMult
+			rightPointer := n.reader.nodeReader.readRight(offset)
+
+			node.bit++
+			n.nodes = append(n.nodes, netNode{
+				pointer: rightPointer,
+				ip:      ipRight,
+				bit:     node.bit,
+			})
+
+			node.pointer = n.reader.nodeReader.readLeft(offset)
+		}
+	}
+
+	return false
+}
+
+// Network returns the current network or an error if there is a problem
+// decoding the data for the network. It takes a pointer to a result value to
+// decode the network's data into.
+func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
+	if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
+		return nil, err
+	}
+
+	return &net.IPNet{
+		IP:   n.lastNode.ip,
+		Mask: net.CIDRMask(int(n.lastNode.bit), len(n.lastNode.ip)*8),
+	}, nil
+}
+
+// Err returns an error, if any, that was encountered during iteration.
+func (n *Networks) Err() error {
+	return n.err
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/verifier.go b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
new file mode 100644
index 00000000000..45d25c801ca
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
@@ -0,0 +1,190 @@
+package maxminddb
+
+import (
+	"reflect"
+	"runtime"
+)
+
+type verifier struct {
+	reader *Reader
+}
+
+// Verify checks that the database is valid. It validates the search tree,
+// the data section, and the metadata section. This verifier is stricter than
+// the specification and may return errors on databases that are readable.
+func (r *Reader) Verify() error {
+	v := verifier{r}
+	if err := v.verifyMetadata(); err != nil {
+		return err
+	}
+
+	err := v.verifyDatabase()
+	runtime.KeepAlive(v.reader)
+	return err
+}
+
+func (v *verifier) verifyMetadata() error {
+	metadata := v.reader.Metadata
+
+	if metadata.BinaryFormatMajorVersion != 2 {
+		return testError(
+			"binary_format_major_version",
+			2,
+			metadata.BinaryFormatMajorVersion,
+		)
+	}
+
+	if metadata.BinaryFormatMinorVersion != 0 {
+		return testError(
+			"binary_format_minor_version",
+			0,
+			metadata.BinaryFormatMinorVersion,
+		)
+	}
+
+	if metadata.DatabaseType == "" {
+		return testError(
+			"database_type",
+			"non-empty string",
+			metadata.DatabaseType,
+		)
+	}
+
+	if len(metadata.Description) == 0 {
+		return testError(
+			"description",
+			"non-empty slice",
+			metadata.Description,
+		)
+	}
+
+	if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
+		return testError(
+			"ip_version",
+			"4 or 6",
+			metadata.IPVersion,
+		)
+	}
+
+	if metadata.RecordSize != 24 &&
+		metadata.RecordSize != 28 &&
+		metadata.RecordSize != 32 {
+		return testError(
+			"record_size",
+			"24, 28, or 32",
+			metadata.RecordSize,
+		)
+	}
+
+	if metadata.NodeCount == 0 {
+		return testError(
+			"node_count",
+			"positive integer",
+			metadata.NodeCount,
+		)
+	}
+	return nil
+}
+
+func (v *verifier) verifyDatabase() error {
+	offsets, err := v.verifySearchTree()
+	if err != nil {
+		return err
+	}
+
+	if err := v.verifyDataSectionSeparator(); err != nil {
+		return err
+	}
+
+	return v.verifyDataSection(offsets)
+}
+
+func (v *verifier) verifySearchTree() (map[uint]bool, error) {
+	offsets := make(map[uint]bool)
+
+	it := v.reader.Networks()
+	for it.Next() {
+		offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
+		if err != nil {
+			return nil, err
+		}
+		offsets[uint(offset)] = true
+	}
+	if err := it.Err(); err != nil {
+		return nil, err
+	}
+	return offsets, nil
+}
+
+func (v *verifier) verifyDataSectionSeparator() error {
+	separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
+
+	separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
+
+	for _, b := range separator {
+		if b != 0 {
+			return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
+		}
+	}
+	return nil
+}
+
+func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
+	pointerCount := len(offsets)
+
+	decoder := v.reader.decoder
+
+	var offset uint
+	bufferLen := uint(len(decoder.buffer))
+	for offset < bufferLen {
+		var data interface{}
+		rv := reflect.ValueOf(&data)
+		newOffset, err := decoder.decode(offset, rv, 0)
+		if err != nil {
+			return newInvalidDatabaseError("received decoding error (%v) at offset of %v", err, offset)
+		}
+		if newOffset <= offset {
+			return newInvalidDatabaseError("data section offset unexpectedly went from %v to %v", offset, newOffset)
+		}
+
+		pointer := offset
+
+		if _, ok := offsets[pointer]; ok {
+			delete(offsets, pointer)
+		} else {
+			return newInvalidDatabaseError("found data (%v) at %v that the search tree does not point to", data, pointer)
+		}
+
+		offset = newOffset
+	}
+
+	if offset != bufferLen {
+		return newInvalidDatabaseError(
+			"unexpected data at the end of the data section (last offset: %v, end: %v)",
+			offset,
+			bufferLen,
+		)
+	}
+
+	if len(offsets) != 0 {
+		return newInvalidDatabaseError(
+			"found %v pointers (of %v) in the search tree that we did not see in the data section",
+			len(offsets),
+			pointerCount,
+		)
+	}
+	return nil
+}
+
+func testError(
+	field string,
+	expected interface{},
+	actual interface{},
+) error {
+	return newInvalidDatabaseError(
+		"%v - Expected: %v Actual: %v",
+		field,
+		expected,
+		actual,
+	)
+}
diff --git a/vendor/github.com/paulbellamy/ratecounter/.gitignore b/vendor/github.com/paulbellamy/ratecounter/.gitignore
new file mode 100644
index 00000000000..beaf7fcca0c
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.sw?
diff --git a/vendor/github.com/paulbellamy/ratecounter/CONTRIBUTORS.md b/vendor/github.com/paulbellamy/ratecounter/CONTRIBUTORS.md
new file mode 100644
index 00000000000..b7c07ce02bc
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/CONTRIBUTORS.md
@@ -0,0 +1,14 @@
+RateCounter Contributors (sorted alphabetically)
+============================================
+
+- **[cheshir](https://github.com/cheshir)**
+
+  - Added averate rate counter
+
+- **[paulbellamy](https://github.com/paulbellamy)**
+
+  -  Original implementation and general housekeeping
+
+- **[sheerun](https://github.com/sheerun)**
+
+  - Improved memory efficiency
diff --git a/vendor/github.com/paulbellamy/ratecounter/LICENSE b/vendor/github.com/paulbellamy/ratecounter/LICENSE
new file mode 100644
index 00000000000..6cbddede746
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2012 by Paul Bellamy
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/paulbellamy/ratecounter/README.md b/vendor/github.com/paulbellamy/ratecounter/README.md
new file mode 100644
index 00000000000..d3f8fd209eb
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/README.md
@@ -0,0 +1,61 @@
+# ratecounter
+
+[![CircleCI](https://circleci.com/gh/paulbellamy/ratecounter.svg?style=svg)](https://circleci.com/gh/paulbellamy/ratecounter)
+[![Go Report Card](https://goreportcard.com/badge/github.com/paulbellamy/ratecounter)](https://goreportcard.com/report/github.com/paulbellamy/ratecounter)
+[![GoDoc](https://godoc.org/github.com/paulbellamy/ratecounter?status.svg)](https://godoc.org/github.com/paulbellamy/ratecounter)
+[![codecov](https://codecov.io/gh/paulbellamy/ratecounter/branch/master/graph/badge.svg)](https://codecov.io/gh/paulbellamy/ratecounter)
+
+A Thread-Safe RateCounter implementation in Golang
+
+## Usage
+
+```
+import "github.com/paulbellamy/ratecounter"
+```
+
+Package ratecounter provides a thread-safe rate-counter, for tracking
+counts in an interval
+
+Useful for implementing counters and stats of 'requests-per-second' (for
+example):
+
+```go
+// We're recording marks-per-1second
+counter := ratecounter.NewRateCounter(1 * time.Second)
+// Record an event happening
+counter.Incr(1)
+// get the current requests-per-second
+counter.Rate()
+```
+
+To record an average over a longer period, you can:
+
+```go
+// Record requests-per-minute
+counter := ratecounter.NewRateCounter(60 * time.Second)
+// Calculate the average requests-per-second for the last minute
+counter.Rate() / 60
+```
+
+Also you can track average value of some metric in an interval.
+
+Useful for implementing counters and stats of 'average-execution-time' (for
+example):
+
+```go
+// We're recording average execution time of some heavy operation in the last minute.
+counter := ratecounter.NewAvgRateCounter(60 * time.Second)
+// Start timer.
+startTime := time.Now()
+// Execute heavy operation.
+heavyOperation()
+// Record elapsed time.
+counter.Incr(time.Since(startTime).Nanoseconds())
+// Get the current average execution time.
+counter.Rate()
+```
+
+## Documentation
+
+Check latest documentation on [go doc](https://godoc.org/github.com/paulbellamy/ratecounter).
+
diff --git a/vendor/github.com/paulbellamy/ratecounter/avgratecounter.go b/vendor/github.com/paulbellamy/ratecounter/avgratecounter.go
new file mode 100644
index 00000000000..a559b43a3e2
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/avgratecounter.go
@@ -0,0 +1,62 @@
+package ratecounter
+
+import (
+	"strconv"
+	"time"
+)
+
+// An AvgRateCounter is a thread-safe counter which returns
+// the ratio between the number of calls 'Incr' and the counter value in the last interval
+type AvgRateCounter struct {
+	hits     *RateCounter
+	counter  *RateCounter
+	interval time.Duration
+}
+
+// NewAvgRateCounter constructs a new AvgRateCounter, for the interval provided
+func NewAvgRateCounter(intrvl time.Duration) *AvgRateCounter {
+	return &AvgRateCounter{
+		hits:     NewRateCounter(intrvl),
+		counter:  NewRateCounter(intrvl),
+		interval: intrvl,
+	}
+}
+
+// WithResolution determines the minimum resolution of this counter
+func (a *AvgRateCounter) WithResolution(resolution int) *AvgRateCounter {
+	if resolution < 1 {
+		panic("AvgRateCounter resolution cannot be less than 1")
+	}
+
+	a.hits = a.hits.WithResolution(resolution)
+	a.counter = a.counter.WithResolution(resolution)
+
+	return a
+}
+
+// Incr Adds an event into the AvgRateCounter
+func (a *AvgRateCounter) Incr(val int64) {
+	a.hits.Incr(1)
+	a.counter.Incr(val)
+}
+
+// Rate Returns the current ratio between the events count and its values during the last interval
+func (a *AvgRateCounter) Rate() float64 {
+	hits, value := a.hits.Rate(), a.counter.Rate()
+
+	if hits == 0 {
+		return 0 // Avoid division by zero
+	}
+
+	return float64(value) / float64(hits)
+}
+
+// Hits returns the number of calling method Incr during specified interval
+func (a *AvgRateCounter) Hits() int64 {
+	return a.hits.Rate()
+}
+
+// String returns counter's rate formatted to string
+func (a *AvgRateCounter) String() string {
+	return strconv.FormatFloat(a.Rate(), 'e', 5, 64)
+}
diff --git a/vendor/github.com/paulbellamy/ratecounter/circle.yml b/vendor/github.com/paulbellamy/ratecounter/circle.yml
new file mode 100644
index 00000000000..832df3bd364
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/circle.yml
@@ -0,0 +1,21 @@
+dependencies:
+  post:
+    - go get -u github.com/alecthomas/gometalinter
+    - gometalinter --install
+
+test:
+  override:
+    - go test -v -race -coverprofile=coverage.txt -covermode=atomic ./...
+    - |
+      gometalinter \
+        --disable-all \
+        --enable=deadcode \
+        --enable=errcheck \
+        --enable=golint \
+        --enable=gosimple \
+        --enable=unconvert \
+        --enable=vet \
+        --enable=vetshadow \
+        ./...
+  post:
+    - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/paulbellamy/ratecounter/counter.go b/vendor/github.com/paulbellamy/ratecounter/counter.go
new file mode 100644
index 00000000000..4c98a96c14a
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/counter.go
@@ -0,0 +1,21 @@
+package ratecounter
+
+import "sync/atomic"
+
+// A Counter is a thread-safe counter implementation
+type Counter int64
+
+// Incr method increments the counter by some value
+func (c *Counter) Incr(val int64) {
+	atomic.AddInt64((*int64)(c), val)
+}
+
+// Reset method resets the counter's value to zero
+func (c *Counter) Reset() {
+	atomic.StoreInt64((*int64)(c), 0)
+}
+
+// Value method returns the counter's current value
+func (c *Counter) Value() int64 {
+	return atomic.LoadInt64((*int64)(c))
+}
diff --git a/vendor/github.com/paulbellamy/ratecounter/doc.go b/vendor/github.com/paulbellamy/ratecounter/doc.go
new file mode 100644
index 00000000000..11d71d018c6
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/doc.go
@@ -0,0 +1,24 @@
+/*
+Package ratecounter provides a thread-safe rate-counter, for tracking counts
+in an interval
+
+Useful for implementing counters and stats of 'requests-per-second' (for example).
+
+  // We're recording marks-per-1second
+  counter := ratecounter.NewRateCounter(1 * time.Second)
+
+  // Record an event happening
+  counter.Mark()
+
+  // get the current requests-per-second
+  counter.Rate()
+
+To record an average over a longer period, you can:
+
+  // Record requests-per-minute
+  counter := ratecounter.NewRateCounter(60 * time.Second)
+
+  // Calculate the average requests-per-second for the last minute
+  counter.Rate() / 60
+*/
+package ratecounter
diff --git a/vendor/github.com/paulbellamy/ratecounter/ratecounter.go b/vendor/github.com/paulbellamy/ratecounter/ratecounter.go
new file mode 100644
index 00000000000..23472c73c98
--- /dev/null
+++ b/vendor/github.com/paulbellamy/ratecounter/ratecounter.go
@@ -0,0 +1,81 @@
+package ratecounter
+
+import (
+	"strconv"
+	"sync/atomic"
+	"time"
+)
+
+// A RateCounter is a thread-safe counter which returns the number of times
+// 'Incr' has been called in the last interval
+type RateCounter struct {
+	counter    Counter
+	interval   time.Duration
+	resolution int
+	partials   []Counter
+	current    int32
+	running    int32
+}
+
+// NewRateCounter Constructs a new RateCounter, for the interval provided
+func NewRateCounter(intrvl time.Duration) *RateCounter {
+	ratecounter := &RateCounter{
+		interval: intrvl,
+		running:  0,
+	}
+
+	return ratecounter.WithResolution(20)
+}
+
+// WithResolution determines the minimum resolution of this counter, default is 20
+func (r *RateCounter) WithResolution(resolution int) *RateCounter {
+	if resolution < 1 {
+		panic("RateCounter resolution cannot be less than 1")
+	}
+
+	r.resolution = resolution
+	r.partials = make([]Counter, resolution)
+	r.current = 0
+
+	return r
+}
+
+func (r *RateCounter) run() {
+	if ok := atomic.CompareAndSwapInt32(&r.running, 0, 1); !ok {
+		return
+	}
+
+	go func() {
+		ticker := time.NewTicker(time.Duration(float64(r.interval) / float64(r.resolution)))
+
+		for range ticker.C {
+			current := atomic.LoadInt32(&r.current)
+			next := (int(current) + 1) % r.resolution
+			r.counter.Incr(-1 * r.partials[next].Value())
+			r.partials[next].Reset()
+			atomic.CompareAndSwapInt32(&r.current, current, int32(next))
+			if r.counter.Value() == 0 {
+				atomic.StoreInt32(&r.running, 0)
+				ticker.Stop()
+
+				return
+			}
+		}
+	}()
+}
+
+// Incr Add an event into the RateCounter
+func (r *RateCounter) Incr(val int64) {
+	r.counter.Incr(val)
+	r.partials[atomic.LoadInt32(&r.current)].Incr(val)
+	r.run()
+}
+
+// Rate Return the current number of events in the last interval
+func (r *RateCounter) Rate() int64 {
+	return r.counter.Value()
+}
+
+func (r *RateCounter) String() string {
+	return strconv.FormatInt(r.counter.Value(), 10)
+}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 00000000000..5e987350471
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,34 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
new file mode 100644
index 00000000000..fd6c6db713d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+env:
+  - GO111MODULE=off
+
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - 1.12.x
+  - master
+
+matrix:
+ fast_finish: true
+ allow_failures:
+   - go: master
+
+sudo: false
+
+script: 
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
+ - go test -v -cpu=2 -tags noasm
+ - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 00000000000..bd899d8353d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 00000000000..4ee388e81bf
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,90 @@
+# lz4 : LZ4 compression in pure Go
+
+[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/cmd/lz4c
+```
+
+Usage
+
+```
+Usage of lz4c:
+  -version
+        print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [ ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [ ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 00000000000..664d9be580d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,413 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"math/bits"
+	"sync"
+)
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	if len(src) == 0 {
+		return 0, nil
+	}
+	if di := decodeBlock(dst, src); di >= 0 {
+		return di, nil
+	}
+	return 0, ErrInvalidSourceShortBuffer
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
+	defer recoverBlock(&err)
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+	if len(hashTable) < htSize {
+		htIface := htPool.Get()
+		defer htPool.Put(htIface)
+		hashTable = (*(htIface).(*[htSize]int))[:]
+	}
+	// Prove to the compiler the table has at least htSize elements.
+	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
+	hashTable = hashTable[:htSize]
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	for si < sn {
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
+		h := blockHash(match)
+		h2 := blockHash(match >> 8)
+
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
+		ref := hashTable[h]
+		ref2 := hashTable[h2]
+		hashTable[h] = si
+		hashTable[h2] = si + 1
+		offset := si - ref
+
+		// If offset <= 0 we got an old entry in the hash table.
+		if offset <= 0 || offset >= winSize || // Out of window.
+			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref = hashTable[h]
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize ||
+				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref
+				hashTable[h] = si
+
+				if offset <= 0 || offset >= winSize ||
+					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
+		}
+
+		// Match found.
+		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
+
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
+		}
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si+8 < sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
+		}
+
+		mLen = si - mLen
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		_ = dst[di] // Bound check elimination.
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		hashTable[h] = si - 2
+	}
+
+lastLiterals:
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
+
+// Pool of hash tables for CompressBlock.
+var htPool = sync.Pool{
+	New: func() interface{} {
+		return new([htSize]int)
+	},
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
+// CompressBlockHC compresses the source buffer src into the destination dst
+// with max search depth (use 0 or negative value for no max).
+//
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
+	defer recoverBlock(&err)
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	var si, di, anchor int
+
+	// hashTable: stores the last position found for a given hash
+	// chainTable: stores previous positions for a given hash
+	var hashTable, chainTable [winSize]int
+
+	if depth <= 0 {
+		depth = winSize
+	}
+
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHashHC(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (>adaptSkipLog
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHashHC(match)
+			chainTable[si&winMask] = hashTable[h]
+			hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+lastLiterals:
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
new file mode 100644
index 00000000000..bc5e78d40f0
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug.go
@@ -0,0 +1,23 @@
+// +build lz4debug
+
+package lz4
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+const debugFlag = true
+
+func debug(args ...interface{}) {
+	_, file, line, _ := runtime.Caller(1)
+	file = filepath.Base(file)
+
+	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
+	if f[len(f)-1] != '\n' {
+		f += "\n"
+	}
+	fmt.Fprintf(os.Stderr, f, args[1:]...)
+}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
new file mode 100644
index 00000000000..44211ad9645
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug_stub.go
@@ -0,0 +1,7 @@
+// +build !lz4debug
+
+package lz4
+
+const debugFlag = false
+
+func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
new file mode 100644
index 00000000000..43cc14fbe2e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_amd64.go
@@ -0,0 +1,8 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4
+
+//go:noescape
+func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
new file mode 100644
index 00000000000..20fef39759c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_amd64.s
@@ -0,0 +1,375 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX scratch
+// DX token
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// func decodeBlock(dst, src []byte) int
+// using 50 bytes of stack currently
+TEXT ·decodeBlock(SB), NOSPLIT, $64-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, R11
+	MOVQ dst_len+8(FP), R8
+	ADDQ DI, R8
+
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R9
+	ADDQ SI, R9
+
+	// shortcut ends
+	// short output end
+	MOVQ R8, R12
+	SUBQ $32, R12
+	// short input end
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+loop:
+	// for si < len(src)
+	CMPQ SI, R9
+	JGE end
+
+	// token := uint32(src[si])
+	MOVBQZX (SI), DX
+	INCQ SI
+
+	// lit_len = token >> 4
+	// if lit_len > 0
+	// CX = lit_len
+	MOVQ DX, CX
+	SHRQ $4, CX
+
+	// if lit_len != 0xF
+	CMPQ CX, $0xF
+	JEQ lit_len_loop_pre
+	CMPQ DI, R12
+	JGE lit_len_loop_pre
+	CMPQ SI, R13
+	JGE lit_len_loop_pre
+
+	// copy shortcut
+
+	// A two-stage shortcut for the most common case:
+	// 1) If the literal length is 0..14, and there is enough space,
+	// enter the shortcut and copy 16 bytes on behalf of the literals
+	// (in the fast mode, only 8 bytes can be safely copied this way).
+	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
+	// manner; but we ensure that there's enough space in the output for
+	// those 18 bytes earlier, upon entering the shortcut (in other words,
+	// there is a combined check for both stages).
+
+	// copy literal
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+	ADDQ CX, DI
+	ADDQ CX, SI
+
+	MOVQ DX, CX
+	ANDQ $0xF, CX
+
+	// The second stage: prepare for match copying, decode full info.
+	// If it doesn't work out, the info won't be wasted.
+	// offset := uint16(data[:2])
+	MOVWQZX (SI), DX
+	ADDQ $2, SI
+
+	MOVQ DI, AX
+	SUBQ DX, AX
+	CMPQ AX, DI
+	JGT err_short_buf
+
+	// if we can't do the second stage then jump straight to read the
+	// match length, we already have the offset.
+	CMPQ CX, $0xF
+	JEQ match_len_loop_pre
+	CMPQ DX, $8
+	JLT match_len_loop_pre
+	CMPQ AX, R11
+	JLT err_short_buf
+
+	// memcpy(op + 0, match + 0, 8);
+	MOVQ (AX), BX
+	MOVQ BX, (DI)
+	// memcpy(op + 8, match + 8, 8);
+	MOVQ 8(AX), BX
+	MOVQ BX, 8(DI)
+	// memcpy(op +16, match +16, 2);
+	MOVW 16(AX), BX
+	MOVW BX, 16(DI)
+
+	ADDQ $4, DI // minmatch
+	ADDQ CX, DI
+
+	// shortcut complete, load next token
+	JMP loop
+
+lit_len_loop_pre:
+	// if lit_len > 0
+	CMPQ CX, $0
+	JEQ offset
+	CMPQ CX, $0xF
+	JNE copy_literal
+
+lit_len_loop:
+	// for src[si] == 0xFF
+	CMPB (SI), $0xFF
+	JNE lit_len_finalise
+
+	// bounds check src[si+1]
+	MOVQ SI, AX
+	ADDQ $1, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	// lit_len += 0xFF
+	ADDQ $0xFF, CX
+	INCQ SI
+	JMP lit_len_loop
+
+lit_len_finalise:
+	// lit_len += int(src[si])
+	// si++
+	MOVBQZX (SI), AX
+	ADDQ AX, CX
+	INCQ SI
+
+copy_literal:
+	// bounds check src and dst
+	MOVQ SI, AX
+	ADDQ CX, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	MOVQ DI, AX
+	ADDQ CX, AX
+	CMPQ AX, R8
+	JGT err_short_buf
+
+	// whats a good cut off to call memmove?
+	CMPQ CX, $16
+	JGT memmove_lit
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_lit
+
+	// if len(src[si:]) < 16
+	MOVQ R9, AX
+	SUBQ SI, AX
+	CMPQ AX, $16
+	JLT memmove_lit
+
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+
+	JMP finish_lit_copy
+
+memmove_lit:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP) // need len to inc SI, DI after
+	MOVB DX, 48(SP)
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVB 48(SP), DX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+finish_lit_copy:
+	ADDQ CX, SI
+	ADDQ CX, DI
+
+	CMPQ SI, R9
+	JGE end
+
+offset:
+	// CX := mLen
+	// free up DX to use for offset
+	MOVQ DX, CX
+
+	MOVQ SI, AX
+	ADDQ $2, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	// offset
+	// DX := int(src[si]) | int(src[si+1])<<8
+	MOVWQZX (SI), DX
+	ADDQ $2, SI
+
+	// 0 offset is invalid
+	CMPQ DX, $0
+	JEQ err_corrupt
+
+	ANDB $0xF, CX
+
+match_len_loop_pre:
+	// if mlen != 0xF
+	CMPB CX, $0xF
+	JNE copy_match
+
+match_len_loop:
+	// for src[si] == 0xFF
+	// lit_len += 0xFF
+	CMPB (SI), $0xFF
+	JNE match_len_finalise
+
+	// bounds check src[si+1]
+	MOVQ SI, AX
+	ADDQ $1, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	ADDQ $0xFF, CX
+	INCQ SI
+	JMP match_len_loop
+
+match_len_finalise:
+	// lit_len += int(src[si])
+	// si++
+	MOVBQZX (SI), AX
+	ADDQ AX, CX
+	INCQ SI
+
+copy_match:
+	// mLen += minMatch
+	ADDQ $4, CX
+
+	// check we have match_len bytes left in dst
+	// di+match_len < len(dst)
+	MOVQ DI, AX
+	ADDQ CX, AX
+	CMPQ AX, R8
+	JGT err_short_buf
+
+	// DX = offset
+	// CX = match_len
+	// BX = &dst + (di - offset)
+	MOVQ DI, BX
+	SUBQ DX, BX
+
+	// check BX is within dst
+	// if BX < &dst
+	CMPQ BX, R11
+	JLT err_short_buf
+
+	// if offset + match_len < di
+	MOVQ BX, AX
+	ADDQ CX, AX
+	CMPQ DI, AX
+	JGT copy_interior_match
+
+	// AX := len(dst[:di])
+	// MOVQ DI, AX
+	// SUBQ R11, AX
+
+	// copy 16 bytes at a time
+	// if di-offset < 16 copy 16-(di-offset) bytes to di
+	// then do the remaining
+
+copy_match_loop:
+	// for match_len >= 0
+	// dst[di] = dst[i]
+	// di++
+	// i++
+	MOVB (BX), AX
+	MOVB AX, (DI)
+	INCQ DI
+	INCQ BX
+	DECQ CX
+
+	CMPQ CX, $0
+	JGT copy_match_loop
+
+	JMP loop
+
+copy_interior_match:
+	CMPQ CX, $16
+	JGT memmove_match
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_match
+
+	MOVOU (BX), X0
+	MOVOU X0, (DI)
+
+	ADDQ CX, DI
+	JMP loop
+
+memmove_match:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ CX, 16(SP)
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP) // need len to inc SI, DI after
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	ADDQ CX, DI
+	JMP loop
+
+err_corrupt:
+	MOVQ $-1, ret+48(FP)
+	RET
+
+err_short_buf:
+	MOVQ $-2, ret+48(FP)
+	RET
+
+end:
+	SUBQ R11, DI
+	MOVQ DI, ret+48(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
new file mode 100644
index 00000000000..919888edf7d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_other.go
@@ -0,0 +1,98 @@
+// +build !amd64 appengine !gc noasm
+
+package lz4
+
+func decodeBlock(dst, src []byte) (ret int) {
+	const hasError = -2
+	defer func() {
+		if recover() != nil {
+			ret = hasError
+		}
+	}()
+
+	var si, di int
+	for {
+		// Literals and match lengths (token).
+		b := int(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			switch {
+			case lLen < 0xF && si+16 < len(src):
+				// Shortcut 1
+				// if we have enough room in src and dst, and the literals length
+				// is small enough (0..14) then copy all 16 bytes, even if not all
+				// are part of the literals.
+				copy(dst[di:], src[si:si+16])
+				si += lLen
+				di += lLen
+				if mLen := b & 0xF; mLen < 0xF {
+					// Shortcut 2
+					// if the match length (4..18) fits within the literals, then copy
+					// all 18 bytes, even if not all are part of the literals.
+					mLen += 4
+					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
+						i := di - offset
+						end := i + 18
+						if end > len(dst) {
+							// The remaining buffer may not hold 18 bytes.
+							// See https://github.com/pierrec/lz4/issues/51.
+							end = len(dst)
+						}
+						copy(dst[di:], dst[i:end])
+						si += 2
+						di += mLen
+						continue
+					}
+				}
+			case lLen == 0xF:
+				for src[si] == 0xFF {
+					lLen += 0xFF
+					si++
+				}
+				lLen += int(src[si])
+				si++
+				fallthrough
+			default:
+				copy(dst[di:di+lLen], src[si:si+lLen])
+				si += lLen
+				di += lLen
+			}
+		}
+		if si >= len(src) {
+			return di
+		}
+
+		offset := int(src[si]) | int(src[si+1])<<8
+		if offset == 0 {
+			return hasError
+		}
+		si += 2
+
+		// Match.
+		mLen := b & 0xF
+		if mLen == 0xF {
+			for src[si] == 0xFF {
+				mLen += 0xFF
+				si++
+			}
+			mLen += int(src[si])
+			si++
+		}
+		mLen += minMatch
+
+		// Copy the match.
+		expanded := dst[di-offset:]
+		if mLen > offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += copy(dst[di:di+mLen], expanded[:mLen])
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
new file mode 100644
index 00000000000..1c45d1813ce
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/errors.go
@@ -0,0 +1,30 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	rdebug "runtime/debug"
+)
+
+var (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
+	// ErrInvalid is returned when reading an invalid LZ4 archive.
+	ErrInvalid = errors.New("lz4: bad magic number")
+	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
+	ErrBlockDependency = errors.New("lz4: block dependency not supported")
+	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
+	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		if debugFlag {
+			fmt.Fprintln(os.Stderr, r)
+			rdebug.PrintStack()
+		}
+		*e = ErrInvalidSourceShortBuffer
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
new file mode 100644
index 00000000000..7a76a6bce2b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,223 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/XXH/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
+
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v1       uint32
+	v2       uint32
+	v3       uint32
+	v4       uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v1 = prime1plus2
+	xxh.v2 = prime2
+	xxh.v3 = 0
+	xxh.v4 = prime1minus
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	p := 0
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
+	if m > 0 {
+		// some data left from previous update
+		copy(xxh.buf[xxh.bufused:], input[:r])
+		xxh.bufused += len(input) - r
+
+		// fast rotl(13)
+		buf := xxh.buf[:16] // BCE hint.
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+		p = r
+		xxh.bufused = 0
+	}
+
+	for n := n - 16; p <= n; p += 16 {
+		sub := input[p:][:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+	}
+	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
+
+	copy(xxh.buf[xxh.bufused:], input[p:])
+	xxh.bufused += len(input) - p
+
+	return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
+	} else {
+		h32 += prime5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// ChecksumZero returns the 32bits Hash value.
+func ChecksumZero(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime5
+	} else {
+		v1 := prime1plus2
+		v2 := prime2
+		v3 := uint32(0)
+		v4 := prime1minus
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Uint32Zero hashes x with seed 0.
+func Uint32Zero(x uint32) uint32 {
+	h := prime5 + 4 + x*prime3
+	h = rol17(h) * prime4
+	h ^= h >> 15
+	h *= prime2
+	h ^= h >> 13
+	h *= prime3
+	h ^= h >> 16
+	return h
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 00000000000..a3284bdf708
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,116 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+//
+package lz4
+
+import (
+	"math/bits"
+	"sync"
+)
+
+const (
+	// Extension is the LZ4 frame file name extension
+	Extension = ".lz4"
+	// Version is the LZ4 frame format version
+	Version = 1
+
+	frameMagic       uint32 = 0x184D2204
+	frameSkipMagic   uint32 = 0x184D2A50
+	frameMagicLegacy uint32 = 0x184C2102
+
+	// The following constants are used to setup the compression algorithm.
+	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog          = 16 // LZ4 64Kb window size limit
+	winSize             = 1 << winSizeLog
+	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
+	compressedBlockFlag = 1 << 31
+	compressedBlockMask = compressedBlockFlag - 1
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+const (
+	blockSize64K = 1 << (16 + 2*iota)
+	blockSize256K
+	blockSize1M
+	blockSize4M
+)
+
+var (
+	// Keep a pool of buffers for each valid block sizes.
+	bsMapValue = [...]*sync.Pool{
+		newBufferPool(2 * blockSize64K),
+		newBufferPool(2 * blockSize256K),
+		newBufferPool(2 * blockSize1M),
+		newBufferPool(2 * blockSize4M),
+	}
+)
+
+// newBufferPool returns a pool for buffers of the given size.
+func newBufferPool(size int) *sync.Pool {
+	return &sync.Pool{
+		New: func() interface{} {
+			return make([]byte, size)
+		},
+	}
+}
+
+// getBuffer returns a buffer to its pool.
+func getBuffer(size int) []byte {
+	idx := blockSizeValueToIndex(size) - 4
+	return bsMapValue[idx].Get().([]byte)
+}
+
+// putBuffer returns a buffer to its pool.
+func putBuffer(size int, buf []byte) {
+	if cap(buf) > 0 {
+		idx := blockSizeValueToIndex(size) - 4
+		bsMapValue[idx].Put(buf[:cap(buf)])
+	}
+}
+func blockSizeIndexToValue(i byte) int {
+	return 1 << (16 + 2*uint(i))
+}
+func isValidBlockSize(size int) bool {
+	const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
+
+	return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
+}
+func blockSizeValueToIndex(size int) byte {
+	return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
+}
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition
+// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller's responsibility to check them if necessary.
+type Header struct {
+	BlockChecksum    bool   // Compressed blocks checksum flag.
+	NoChecksum       bool   // Frame checksum flag.
+	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
+	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
+	done             bool   // Header processed flag (Read or Write and checked).
+}
+
+// Reset reset internal status
+func (h *Header) Reset() {
+	h.done = false
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
new file mode 100644
index 00000000000..9a0fb00709d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
@@ -0,0 +1,29 @@
+//+build go1.10
+
+package lz4
+
+import (
+	"fmt"
+	"strings"
+)
+
+func (h Header) String() string {
+	var s strings.Builder
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
new file mode 100644
index 00000000000..12c761a2e7f
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
@@ -0,0 +1,29 @@
+//+build !go1.10
+
+package lz4
+
+import (
+	"bytes"
+	"fmt"
+)
+
+func (h Header) String() string {
+	var s bytes.Buffer
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 00000000000..87dd72bd0db
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,335 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+	Header
+	// Handler called when a block has been successfully read.
+	// It provides the number of bytes read.
+	OnBlockDone func(size int)
+
+	buf      [8]byte       // Scrap buffer.
+	pos      int64         // Current position in src.
+	src      io.Reader     // Source.
+	zdata    []byte        // Compressed data.
+	data     []byte        // Uncompressed data.
+	idx      int           // Index of unread bytes into data.
+	checksum xxh32.XXHZero // Frame hash.
+	skip     int64         // Bytes to skip before next read.
+	dpos     int64         // Position in dest
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+	r := &Reader{src: src}
+	return r
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+	defer z.checksum.Reset()
+
+	buf := z.buf[:]
+	for {
+		magic, err := z.readUint32()
+		if err != nil {
+			z.pos += 4
+			if !first && err == io.ErrUnexpectedEOF {
+				return io.EOF
+			}
+			return err
+		}
+		if magic == frameMagic {
+			break
+		}
+		if magic>>8 != frameSkipMagic>>8 {
+			return ErrInvalid
+		}
+		skipSize, err := z.readUint32()
+		if err != nil {
+			return err
+		}
+		z.pos += 4
+		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+		if err != nil {
+			return err
+		}
+		z.pos += m
+	}
+
+	// Header.
+	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+		return err
+	}
+	z.pos += 8
+
+	b := buf[0]
+	if v := b >> 6; v != Version {
+		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
+	}
+	if b>>5&1 == 0 {
+		return ErrBlockDependency
+	}
+	z.BlockChecksum = b>>4&1 > 0
+	frameSize := b>>3&1 > 0
+	z.NoChecksum = b>>2&1 == 0
+
+	bmsID := buf[1] >> 4 & 0x7
+	if bmsID < 4 || bmsID > 7 {
+		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
+	}
+	bSize := blockSizeIndexToValue(bmsID - 4)
+	z.BlockMaxSize = bSize
+
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	if debugFlag {
+		debug("header block max size id=%d size=%d", bmsID, bSize)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = len(z.data)
+
+	_, _ = z.checksum.Write(buf[0:2])
+
+	if frameSize {
+		buf := buf[:8]
+		if _, err := io.ReadFull(z.src, buf); err != nil {
+			return err
+		}
+		z.Size = binary.LittleEndian.Uint64(buf)
+		z.pos += 8
+		_, _ = z.checksum.Write(buf)
+	}
+
+	// Header checksum.
+	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+		return err
+	}
+	z.pos++
+	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
+	}
+
+	z.Header.done = true
+	if debugFlag {
+		debug("header read: %v", z.Header)
+	}
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+func (z *Reader) Read(buf []byte) (int, error) {
+	if debugFlag {
+		debug("Read buf len=%d", len(buf))
+	}
+	if !z.Header.done {
+		if err := z.readHeader(true); err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
+				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
+		}
+	}
+
+	if len(buf) == 0 {
+		return 0, nil
+	}
+
+	if z.idx == len(z.data) {
+		// No data ready for reading, process the next block.
+		if debugFlag {
+			debug("reading block from writer")
+		}
+		// Reset uncompressed buffer
+		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
+
+		// Block length: 0 = end of frame, highest bit set: uncompressed.
+		bLen, err := z.readUint32()
+		if err != nil {
+			return 0, err
+		}
+		z.pos += 4
+
+		if bLen == 0 {
+			// End of frame reached.
+			if !z.NoChecksum {
+				// Validate the frame checksum.
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				if debugFlag {
+					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
+				}
+				z.pos += 4
+				if h := z.checksum.Sum32(); checksum != h {
+					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			// Get ready for the next concatenated frame and keep the position.
+			pos := z.pos
+			z.Reset(z.src)
+			z.pos = pos
+
+			// Since multiple frames can be concatenated, check for more.
+			return 0, z.readHeader(false)
+		}
+
+		if debugFlag {
+			debug("raw block size %d", bLen)
+		}
+		if bLen&compressedBlockFlag > 0 {
+			// Uncompressed block.
+			bLen &= compressedBlockMask
+			if debugFlag {
+				debug("uncompressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			z.data = z.data[:bLen]
+			if _, err := io.ReadFull(z.src, z.data); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(int(bLen))
+			}
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(z.data); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+		} else {
+			// Compressed block.
+			if debugFlag {
+				debug("compressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			zdata := z.zdata[:bLen]
+			if _, err := io.ReadFull(z.src, zdata); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(zdata); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			n, err := UncompressBlock(zdata, z.data)
+			if err != nil {
+				return 0, err
+			}
+			z.data = z.data[:n]
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(n)
+			}
+		}
+
+		if !z.NoChecksum {
+			_, _ = z.checksum.Write(z.data)
+			if debugFlag {
+				debug("current frame checksum %x", z.checksum.Sum32())
+			}
+		}
+		z.idx = 0
+	}
+
+	if z.skip > int64(len(z.data[z.idx:])) {
+		z.skip -= int64(len(z.data[z.idx:]))
+		z.dpos += int64(len(z.data[z.idx:]))
+		z.idx = len(z.data)
+		return 0, nil
+	}
+
+	z.idx += int(z.skip)
+	z.dpos += z.skip
+	z.skip = 0
+
+	n := copy(buf, z.data[z.idx:])
+	z.idx += n
+	z.dpos += int64(n)
+	if debugFlag {
+		debug("copied %d bytes to input", n)
+	}
+
+	return n, nil
+}
+
+// Seek implements io.Seeker, but supports seeking forward from the current
+// position only. Any other seek will return an error. Allows skipping output
+// bytes which aren't needed, which in some scenarios is faster than reading
+// and discarding them.
+// Note this may cause future calls to Read() to read 0 bytes if all of the
+// data they would have returned is skipped.
+func (z *Reader) Seek(offset int64, whence int) (int64, error) {
+	if offset < 0 || whence != io.SeekCurrent {
+		return z.dpos + z.skip, ErrUnsupportedSeek
+	}
+	z.skip += offset
+	return z.dpos + z.skip, nil
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.pos = 0
+	z.src = r
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+	z.checksum.Reset()
+}
+
+// readUint32 reads an uint32 into the supplied buffer.
+// The idea is to make use of the already allocated buffers avoiding additional allocations.
+func (z *Reader) readUint32() (uint32, error) {
+	buf := z.buf[:4]
+	_, err := io.ReadFull(z.src, buf)
+	x := binary.LittleEndian.Uint32(buf)
+	return x, err
+}
diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go
new file mode 100644
index 00000000000..1670a77d02a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader_legacy.go
@@ -0,0 +1,207 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// ReaderLegacy implements the LZ4Demo frame decoder.
+// The Header is set after the first call to Read().
+type ReaderLegacy struct {
+	Header
+	// Handler called when a block has been successfully read.
+	// It provides the number of bytes read.
+	OnBlockDone func(size int)
+
+	lastBlock bool
+	buf       [8]byte   // Scrap buffer.
+	pos       int64     // Current position in src.
+	src       io.Reader // Source.
+	zdata     []byte    // Compressed data.
+	data      []byte    // Uncompressed data.
+	idx       int       // Index of unread bytes into data.
+	skip      int64     // Bytes to skip before next read.
+	dpos      int64     // Position in dest
+}
+
+// NewReaderLegacy returns a new LZ4Demo frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReaderLegacy(src io.Reader) *ReaderLegacy {
+	r := &ReaderLegacy{src: src}
+	return r
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *ReaderLegacy) readLegacyHeader() error {
+	z.lastBlock = false
+	magic, err := z.readUint32()
+	if err != nil {
+		z.pos += 4
+		if err == io.ErrUnexpectedEOF {
+			return io.EOF
+		}
+		return err
+	}
+	if magic != frameMagicLegacy {
+		return ErrInvalid
+	}
+	z.pos += 4
+
+	// Legacy has fixed 8MB blocksizes
+	// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
+	bSize := blockSize4M * 2
+
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	if debugFlag {
+		debug("header block max size size=%d", bSize)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = len(z.data)
+
+	z.Header.done = true
+	if debugFlag {
+		debug("header read: %v", z.Header)
+	}
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+func (z *ReaderLegacy) Read(buf []byte) (int, error) {
+	if debugFlag {
+		debug("Read buf len=%d", len(buf))
+	}
+	if !z.Header.done {
+		if err := z.readLegacyHeader(); err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
+				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
+		}
+	}
+
+	if len(buf) == 0 {
+		return 0, nil
+	}
+
+	if z.idx == len(z.data) {
+		// No data ready for reading, process the next block.
+		if debugFlag {
+			debug("  reading block from writer %d %d", z.idx, blockSize4M*2)
+		}
+
+		// Reset uncompressed buffer
+		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
+
+		bLen, err := z.readUint32()
+		if err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("   bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
+		}
+		z.pos += 4
+
+		// Legacy blocks are always compressed, even when detrimental
+		if debugFlag {
+			debug("   compressed block size %d", bLen)
+		}
+
+		if int(bLen) > cap(z.data) {
+			return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+		}
+		zdata := z.zdata[:bLen]
+		if _, err := io.ReadFull(z.src, zdata); err != nil {
+			return 0, err
+		}
+		z.pos += int64(bLen)
+
+		n, err := UncompressBlock(zdata, z.data)
+		if err != nil {
+			return 0, err
+		}
+
+		z.data = z.data[:n]
+		if z.OnBlockDone != nil {
+			z.OnBlockDone(n)
+		}
+
+		z.idx = 0
+
+		// Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
+		// it means we've reached the end...
+		if n < blockSize4M*2 {
+			z.lastBlock = true
+		}
+	}
+
+	if z.skip > int64(len(z.data[z.idx:])) {
+		z.skip -= int64(len(z.data[z.idx:]))
+		z.dpos += int64(len(z.data[z.idx:]))
+		z.idx = len(z.data)
+		return 0, nil
+	}
+
+	z.idx += int(z.skip)
+	z.dpos += z.skip
+	z.skip = 0
+
+	n := copy(buf, z.data[z.idx:])
+	z.idx += n
+	z.dpos += int64(n)
+	if debugFlag {
+		debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
+	}
+	if z.lastBlock && len(z.data) == z.idx {
+		return n, io.EOF
+	}
+	return n, nil
+}
+
+// Seek implements io.Seeker, but supports seeking forward from the current
+// position only. Any other seek will return an error. Allows skipping output
+// bytes which aren't needed, which in some scenarios is faster than reading
+// and discarding them.
+// Note this may cause future calls to Read() to read 0 bytes if all of the
+// data they would have returned is skipped.
+func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
+	if offset < 0 || whence != io.SeekCurrent {
+		return z.dpos + z.skip, ErrUnsupportedSeek
+	}
+	z.skip += offset
+	return z.dpos + z.skip, nil
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *ReaderLegacy) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.pos = 0
+	z.src = r
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+}
+
+// readUint32 reads an uint32 into the supplied buffer.
+// The idea is to make use of the already allocated buffers avoiding additional allocations.
+func (z *ReaderLegacy) readUint32() (uint32, error) {
+	buf := z.buf[:4]
+	_, err := io.ReadFull(z.src, buf)
+	x := binary.LittleEndian.Uint32(buf)
+	return x, err
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 00000000000..6a60a9a6a57
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,413 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"runtime"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// zResult contains the results of compressing a block.
+type zResult struct {
+	size     uint32 // Block header
+	data     []byte // Compressed data
+	checksum uint32 // Data checksum
+}
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+	Header
+	// Handler called when a block has been successfully written out.
+	// It provides the number of bytes written.
+	OnBlockDone func(size int)
+
+	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+	dst       io.Writer     // Destination.
+	checksum  xxh32.XXHZero // Frame checksum.
+	data      []byte        // Data to be compressed + buffer for compressed data.
+	idx       int           // Index into data.
+	hashtable [winSize]int  // Hash table used in CompressBlock().
+
+	// For concurrency.
+	c   chan chan zResult // Channel for block compression goroutines and writer goroutine.
+	err error             // Any error encountered while writing to the underlying destination.
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+	z := new(Writer)
+	z.Reset(dst)
+	return z
+}
+
+// WithConcurrency sets the number of concurrent go routines used for compression.
+// A negative value sets the concurrency to GOMAXPROCS.
+func (z *Writer) WithConcurrency(n int) *Writer {
+	switch {
+	case n == 0 || n == 1:
+		z.c = nil
+		return z
+	case n < 0:
+		n = runtime.GOMAXPROCS(0)
+	}
+	z.c = make(chan chan zResult, n)
+	// Writer goroutine managing concurrent block compression goroutines.
+	go func() {
+		// Process next block compression item.
+		for c := range z.c {
+			// Read the next compressed block result.
+			// Waiting here ensures that the blocks are output in the order they were sent.
+			// The incoming channel is always closed as it indicates to the caller that
+			// the block has been processed.
+			res := <-c
+			n := len(res.data)
+			if n == 0 {
+				// Notify the block compression routine that we are done with its result.
+				// This is used when a sentinel block is sent to terminate the compression.
+				close(c)
+				return
+			}
+			// Write the block.
+			if err := z.writeUint32(res.size); err != nil && z.err == nil {
+				z.err = err
+			}
+			if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
+				z.err = err
+			}
+			if z.BlockChecksum {
+				if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
+					z.err = err
+				}
+			}
+			if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
+				// It is now safe to release the buffer as no longer in use by any goroutine.
+				putBuffer(cap(res.data), res.data)
+			}
+			if h := z.OnBlockDone; h != nil {
+				h(n)
+			}
+			close(c)
+		}
+	}()
+	return z
+}
+
+// newBuffers instantiates new buffers which size matches the one in Header.
+// The returned buffers are for decompression and compression respectively.
+func (z *Writer) newBuffers() {
+	bSize := z.Header.BlockMaxSize
+	buf := getBuffer(bSize)
+	z.data = buf[:bSize] // Uncompressed buffer is the first half.
+}
+
+// freeBuffers puts the writer's buffers back to the pool.
+func (z *Writer) freeBuffers() {
+	// Put the buffer back into the pool, if any.
+	putBuffer(z.Header.BlockMaxSize, z.data)
+	z.data = nil
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+	// Default to 4Mb if BlockMaxSize is not set.
+	if z.Header.BlockMaxSize == 0 {
+		z.Header.BlockMaxSize = blockSize4M
+	}
+	// The only option that needs to be validated.
+	bSize := z.Header.BlockMaxSize
+	if !isValidBlockSize(z.Header.BlockMaxSize) {
+		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
+	}
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	z.newBuffers()
+	z.idx = 0
+
+	// Size is optional.
+	buf := z.buf[:]
+
+	// Set the fixed size data: magic number, block max size and flags.
+	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+	flg := byte(Version << 6)
+	flg |= 1 << 5 // No block dependency.
+	if z.Header.BlockChecksum {
+		flg |= 1 << 4
+	}
+	if z.Header.Size > 0 {
+		flg |= 1 << 3
+	}
+	if !z.Header.NoChecksum {
+		flg |= 1 << 2
+	}
+	buf[4] = flg
+	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
+
+	// Current buffer size: magic(4) + flags(1) + block max size (1).
+	n := 6
+	// Optional items.
+	if z.Header.Size > 0 {
+		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+		n += 8
+	}
+
+	// The header checksum includes the flags, block max size and optional Size.
+	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
+	z.checksum.Reset()
+
+	// Header ready, write it out.
+	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+		return err
+	}
+	z.Header.done = true
+	if debugFlag {
+		debug("wrote header %v", z.Header)
+	}
+
+	return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+func (z *Writer) Write(buf []byte) (int, error) {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return 0, err
+		}
+	}
+	if debugFlag {
+		debug("input buffer len=%d index=%d", len(buf), z.idx)
+	}
+
+	zn := len(z.data)
+	var n int
+	for len(buf) > 0 {
+		if z.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err := z.compressBlock(buf[:zn]); err != nil {
+				return n, err
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(z.data[z.idx:], buf)
+		n += m
+		z.idx += m
+		buf = buf[m:]
+		if debugFlag {
+			debug("%d bytes copied to buf, current index %d", n, z.idx)
+		}
+
+		if z.idx < len(z.data) {
+			// Buffer not filled.
+			if debugFlag {
+				debug("need more data for compression")
+			}
+			return n, nil
+		}
+
+		// Buffer full.
+		if err := z.compressBlock(z.data); err != nil {
+			return n, err
+		}
+		z.idx = 0
+	}
+
+	return n, nil
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(data []byte) error {
+	if !z.NoChecksum {
+		_, _ = z.checksum.Write(data)
+	}
+
+	if z.c != nil {
+		c := make(chan zResult)
+		z.c <- c // Send now to guarantee order
+		go writerCompressBlock(c, z.Header, data)
+		return nil
+	}
+
+	zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
+	// The compressed block size cannot exceed the input's.
+	var zn int
+
+	if level := z.Header.CompressionLevel; level != 0 {
+		zn, _ = CompressBlockHC(data, zdata, level)
+	} else {
+		zn, _ = CompressBlock(data, zdata, z.hashtable[:])
+	}
+
+	var bLen uint32
+	if debugFlag {
+		debug("block compression %d => %d", len(data), zn)
+	}
+	if zn > 0 && zn < len(data) {
+		// Compressible and compressed size smaller than uncompressed: ok!
+		bLen = uint32(zn)
+		zdata = zdata[:zn]
+	} else {
+		// Uncompressed block.
+		bLen = uint32(len(data)) | compressedBlockFlag
+		zdata = data
+	}
+	if debugFlag {
+		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
+	}
+
+	// Write the block.
+	if err := z.writeUint32(bLen); err != nil {
+		return err
+	}
+	written, err := z.dst.Write(zdata)
+	if err != nil {
+		return err
+	}
+	if h := z.OnBlockDone; h != nil {
+		h(written)
+	}
+
+	if !z.BlockChecksum {
+		if debugFlag {
+			debug("current frame checksum %x", z.checksum.Sum32())
+		}
+		return nil
+	}
+	checksum := xxh32.ChecksumZero(zdata)
+	if debugFlag {
+		debug("block checksum %x", checksum)
+		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
+	}
+	return z.writeUint32(checksum)
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+func (z *Writer) Flush() error {
+	if debugFlag {
+		debug("flush with index %d", z.idx)
+	}
+	if z.idx == 0 {
+		return nil
+	}
+
+	data := z.data[:z.idx]
+	z.idx = 0
+	if z.c == nil {
+		return z.compressBlock(data)
+	}
+	if !z.NoChecksum {
+		_, _ = z.checksum.Write(data)
+	}
+	c := make(chan zResult)
+	z.c <- c
+	writerCompressBlock(c, z.Header, data)
+	return nil
+}
+
+func (z *Writer) close() error {
+	if z.c == nil {
+		return nil
+	}
+	// Send a sentinel block (no data to compress) to terminate the writer main goroutine.
+	c := make(chan zResult)
+	z.c <- c
+	c <- zResult{}
+	// Wait for the main goroutine to complete.
+	<-c
+	// At this point the main goroutine has shut down or is about to return.
+	z.c = nil
+	return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return err
+		}
+	}
+	if err := z.Flush(); err != nil {
+		return err
+	}
+	if err := z.close(); err != nil {
+		return err
+	}
+	z.freeBuffers()
+
+	if debugFlag {
+		debug("writing last empty block")
+	}
+	if err := z.writeUint32(0); err != nil {
+		return err
+	}
+	if z.NoChecksum {
+		return nil
+	}
+	checksum := z.checksum.Sum32()
+	if debugFlag {
+		debug("stream checksum %x", checksum)
+	}
+	return z.writeUint32(checksum)
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+	n := cap(z.c)
+	_ = z.close()
+	z.freeBuffers()
+	z.Header.Reset()
+	z.dst = w
+	z.checksum.Reset()
+	z.idx = 0
+	z.err = nil
+	// reset hashtable to ensure deterministic output.
+	for i := range z.hashtable {
+		z.hashtable[i] = 0
+	}
+	z.WithConcurrency(n)
+}
+
+// writeUint32 writes a uint32 to the underlying writer.
+func (z *Writer) writeUint32(x uint32) error {
+	buf := z.buf[:4]
+	binary.LittleEndian.PutUint32(buf, x)
+	_, err := z.dst.Write(buf)
+	return err
+}
+
+// writerCompressBlock compresses data into a pooled buffer and writes its result
+// out to the input channel.
+func writerCompressBlock(c chan zResult, header Header, data []byte) {
+	zdata := getBuffer(header.BlockMaxSize)
+	// The compressed block size cannot exceed the input's.
+	var zn int
+	if level := header.CompressionLevel; level != 0 {
+		zn, _ = CompressBlockHC(data, zdata, level)
+	} else {
+		var hashTable [winSize]int
+		zn, _ = CompressBlock(data, zdata, hashTable[:])
+	}
+	var res zResult
+	if zn > 0 && zn < len(data) {
+		res.size = uint32(zn)
+		res.data = zdata[:zn]
+	} else {
+		res.size = uint32(len(data)) | compressedBlockFlag
+		res.data = data
+	}
+	if header.BlockChecksum {
+		res.checksum = xxh32.ChecksumZero(res.data)
+	}
+	c <- res
+}
diff --git a/vendor/github.com/pires/go-proxyproto/.gitignore b/vendor/github.com/pires/go-proxyproto/.gitignore
new file mode 100644
index 00000000000..d19ada07d54
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/.gitignore
@@ -0,0 +1,9 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+.idea
+bin
+pkg
diff --git a/vendor/github.com/pires/go-proxyproto/.travis.yml b/vendor/github.com/pires/go-proxyproto/.travis.yml
new file mode 100644
index 00000000000..76e99a6d745
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+sudo: false
+go:
+  - 1.11
+install:
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/mattn/goveralls
+script:
+  - go fmt
+  - go vet
+  - go test -v -covermode=count -coverprofile=coverage.out
+  - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
+env:
+  global:
+    secure: NRDefpPiVrhkRetDbJ1bek+7+Ojwh9dUSAAP4KBw5cqQbEDMUo/fgTHnTBywe4p7zaJ2IX7B2gmQk8zsAHu8D74A8baYzZBOJzpgAGx6GzTSMtLKTX62TKrKGvslru0/e9V/OaOGRuy1ETteuOb/b23rtBqQ7+N0JfC5+9wjH1mmYd6rbeU8bGMzyvXoCYorgf7VNV1KQM+4355pSDR5cvbV1lHfSut6Pw1dcjOahxheXi1YVhohdkQOwvBlSifVritJgwWcUtrb4xW97pZ8TWnHi5TlqSnxtRSKiPq5aojgsAt9ETnouPBhs0cToyteN3xi5N0SWvn5RRs7mPFFwkpvspghWNtqU4/uPRR0NrbcEiYcEFghicoq7pTthP0iP/KsBb7F1mH2YC79uuNMnOgoByKxLjD/TOybhSvyTRt2TldHZwePxcukXwwL7LHALhojsN299KQgIIiMdn9+oXESIzJXwI10ZDEfLPfhX+LHBQylobNqnnFM/tzFyFDGAqDVEn+yc4GVEu+FjpJ/kqDpTDpnUZ7Ui6KJX/VJfGMgwOrMDegOYlm5Cg6xPug0zb08taciTcWByDWOzZfmHcxOt3JyJXFTh49oFK70Xn+C7YNQt7VxfdsjCJ84HEDxxeY/Rp3HowTzjvqjsVVncKtJm0o7epOnY58RKP/GFwg=
diff --git a/vendor/github.com/pires/go-proxyproto/LICENSE b/vendor/github.com/pires/go-proxyproto/LICENSE
new file mode 100644
index 00000000000..8dada3edaf5
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/pires/go-proxyproto/README.md b/vendor/github.com/pires/go-proxyproto/README.md
new file mode 100644
index 00000000000..f37c0311b59
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/README.md
@@ -0,0 +1,71 @@
+# go-proxyproto
+
+[![Build Status](https://travis-ci.org/pires/go-proxyproto.svg?branch=master)](https://travis-ci.org/pires/go-proxyproto)
+[![Coverage Status](https://coveralls.io/repos/github/pires/go-proxyproto/badge.svg?branch=master)](https://coveralls.io/github/pires/go-proxyproto?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pires/go-proxyproto)](https://goreportcard.com/report/github.com/pires/go-proxyproto)
+
+A Go library implementation of the [PROXY protocol, versions 1 and 2](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt),
+which provides, as per specification:
+> (...) a convenient way to safely transport connection
+> information such as a client's address across multiple layers of NAT or TCP
+> proxies. It is designed to require little changes to existing components and
+> to limit the performance impact caused by the processing of the transported
+> information.
+
+This library is to be used in one of or both proxy clients and proxy servers that need to support said protocol.
+Both protocol versions, 1 (text-based) and 2 (binary-based) are supported.
+
+## Installation
+
+```shell
+$ go get -u github.com/pires/go-proxyproto
+```
+
+## Usage
+
+### Client (TODO)
+
+### Server
+
+```go
+package main
+
+import (
+	"log"
+	"net"
+	
+	proxyproto "github.com/pires/go-proxyproto"
+)
+
+func main() {
+	// Create a listener
+	addr := "localhost:9876"
+	list, err := net.Listen("tcp", addr)
+	if err != nil {
+		log.Fatalf("couldn't listen to %q: %q\n", addr, err.Error())
+	}
+
+	// Wrap listener in a proxyproto listener
+	proxyListener := &proxyproto.Listener{Listener: list}
+	defer proxyListener.Close()
+
+	// Wait for a connection and accept it
+	conn, err := proxyListener.Accept()
+	defer conn.Close()
+
+	// Print connection details
+	if conn.LocalAddr() == nil {
+		log.Fatal("couldn't retrieve local address")
+	}
+	log.Printf("local address: %q", conn.LocalAddr().String())
+
+	if conn.RemoteAddr() == nil {
+		log.Fatal("couldn't retrieve remote address")
+	}
+	log.Printf("remote address: %q", conn.RemoteAddr().String())
+}
+```
+
+## Documentation
+
+[http://godoc.org/github.com/pires/go-proxyproto](http://godoc.org/github.com/pires/go-proxyproto)
diff --git a/vendor/github.com/pires/go-proxyproto/addr_proto.go b/vendor/github.com/pires/go-proxyproto/addr_proto.go
new file mode 100644
index 00000000000..56b91550d2d
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/addr_proto.go
@@ -0,0 +1,71 @@
+package proxyproto
+
+// AddressFamilyAndProtocol represents address family and transport protocol.
+type AddressFamilyAndProtocol byte
+
+const (
+	UNSPEC       = '\x00'
+	TCPv4        = '\x11'
+	UDPv4        = '\x12'
+	TCPv6        = '\x21'
+	UDPv6        = '\x22'
+	UnixStream   = '\x31'
+	UnixDatagram = '\x32'
+)
+
+var supportedTransportProtocol = map[AddressFamilyAndProtocol]bool{
+	TCPv4:        true,
+	UDPv4:        true,
+	TCPv6:        true,
+	UDPv6:        true,
+	UnixStream:   true,
+	UnixDatagram: true,
+}
+
+// IsIPv4 returns true if the address family is IPv4 (AF_INET4), false otherwise.
+func (ap AddressFamilyAndProtocol) IsIPv4() bool {
+	return 0x10 == ap&0xF0
+}
+
+// IsIPv6 returns true if the address family is IPv6 (AF_INET6), false otherwise.
+func (ap AddressFamilyAndProtocol) IsIPv6() bool {
+	return 0x20 == ap&0xF0
+}
+
+// IsUnix returns true if the address family is UNIX (AF_UNIX), false otherwise.
+func (ap AddressFamilyAndProtocol) IsUnix() bool {
+	return 0x30 == ap&0xF0
+}
+
+// IsStream returns true if the transport protocol is TCP or STREAM (SOCK_STREAM), false otherwise.
+func (ap AddressFamilyAndProtocol) IsStream() bool {
+	return 0x01 == ap&0x0F
+}
+
+// IsDatagram returns true if the transport protocol is UDP or DGRAM (SOCK_DGRAM), false otherwise.
+func (ap AddressFamilyAndProtocol) IsDatagram() bool {
+	return 0x02 == ap&0x0F
+}
+
+// IsUnspec returns true if the transport protocol or address family is unspecified, false otherwise.
+func (ap AddressFamilyAndProtocol) IsUnspec() bool {
+	return (0x00 == ap&0xF0) || (0x00 == ap&0x0F)
+}
+
+func (ap AddressFamilyAndProtocol) toByte() byte {
+	if ap.IsIPv4() && ap.IsStream() {
+		return TCPv4
+	} else if ap.IsIPv4() && ap.IsDatagram() {
+		return UDPv4
+	} else if ap.IsIPv6() && ap.IsStream() {
+		return TCPv6
+	} else if ap.IsIPv6() && ap.IsDatagram() {
+		return UDPv6
+	} else if ap.IsUnix() && ap.IsStream() {
+		return UnixStream
+	} else if ap.IsUnix() && ap.IsDatagram() {
+		return UnixDatagram
+	}
+
+	return UNSPEC
+}
diff --git a/vendor/github.com/pires/go-proxyproto/header.go b/vendor/github.com/pires/go-proxyproto/header.go
new file mode 100644
index 00000000000..e2aeee38b90
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/header.go
@@ -0,0 +1,149 @@
+// Package proxyproto implements Proxy Protocol (v1 and v2) parser and writer, as per specification:
+// http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
+package proxyproto
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"net"
+	"time"
+)
+
+var (
+	// Protocol
+	SIGV1 = []byte{'\x50', '\x52', '\x4F', '\x58', '\x59'}
+	SIGV2 = []byte{'\x0D', '\x0A', '\x0D', '\x0A', '\x00', '\x0D', '\x0A', '\x51', '\x55', '\x49', '\x54', '\x0A'}
+
+	ErrCantReadProtocolVersionAndCommand    = errors.New("Can't read proxy protocol version and command")
+	ErrCantReadAddressFamilyAndProtocol     = errors.New("Can't read address family or protocol")
+	ErrCantReadLength                       = errors.New("Can't read length")
+	ErrCantResolveSourceUnixAddress         = errors.New("Can't resolve source Unix address")
+	ErrCantResolveDestinationUnixAddress    = errors.New("Can't resolve destination Unix address")
+	ErrNoProxyProtocol                      = errors.New("Proxy protocol signature not present")
+	ErrUnknownProxyProtocolVersion          = errors.New("Unknown proxy protocol version")
+	ErrUnsupportedProtocolVersionAndCommand = errors.New("Unsupported proxy protocol version and command")
+	ErrUnsupportedAddressFamilyAndProtocol  = errors.New("Unsupported address family and protocol")
+	ErrInvalidLength                        = errors.New("Invalid length")
+	ErrInvalidAddress                       = errors.New("Invalid address")
+	ErrInvalidPortNumber                    = errors.New("Invalid port number")
+)
+
+// Header is the placeholder for proxy protocol header.
+type Header struct {
+	Version            byte
+	Command            ProtocolVersionAndCommand
+	TransportProtocol  AddressFamilyAndProtocol
+	SourceAddress      net.IP
+	DestinationAddress net.IP
+	SourcePort         uint16
+	DestinationPort    uint16
+}
+
+// RemoteAddr returns the address of the remote endpoint of the connection.
+func (header *Header) RemoteAddr() net.Addr {
+	return &net.TCPAddr{
+		IP:   header.SourceAddress,
+		Port: int(header.SourcePort),
+	}
+}
+
+// LocalAddr returns the address of the local endpoint of the connection.
+func (header *Header) LocalAddr() net.Addr {
+	return &net.TCPAddr{
+		IP:   header.DestinationAddress,
+		Port: int(header.DestinationPort),
+	}
+}
+
+// EqualTo returns true if headers are equivalent, false otherwise.
+// Deprecated: use EqualsTo instead. This method will eventually be removed.
+func (header *Header) EqualTo(otherHeader *Header) bool {
+	return header.EqualsTo(otherHeader)
+}
+
+// EqualsTo returns true if headers are equivalent, false otherwise.
+func (header *Header) EqualsTo(otherHeader *Header) bool {
+	if otherHeader == nil {
+		return false
+	}
+	if header.Command.IsLocal() {
+		return true
+	}
+	return header.Version == otherHeader.Version &&
+		header.TransportProtocol == otherHeader.TransportProtocol &&
+		header.SourceAddress.String() == otherHeader.SourceAddress.String() &&
+		header.DestinationAddress.String() == otherHeader.DestinationAddress.String() &&
+		header.SourcePort == otherHeader.SourcePort &&
+		header.DestinationPort == otherHeader.DestinationPort
+}
+
+// WriteTo renders a proxy protocol header in a format and writes it to an io.Writer.
+func (header *Header) WriteTo(w io.Writer) (int64, error) {
+	buf, err := header.Format()
+	if err != nil {
+		return 0, err
+	}
+
+	return bytes.NewBuffer(buf).WriteTo(w)
+}
+
+// Format renders a proxy protocol header in a format to write over the wire.
+func (header *Header) Format() ([]byte, error) {
+	switch header.Version {
+	case 1:
+		return header.formatVersion1()
+	case 2:
+		return header.formatVersion2()
+	default:
+		return nil, ErrUnknownProxyProtocolVersion
+	}
+}
+
+// Read identifies the proxy protocol version and reads the remaining of
+// the header, accordingly.
+//
+// If proxy protocol header signature is not present, the reader buffer remains untouched
+// and is safe for reading outside of this code.
+//
+// If proxy protocol header signature is present but an error is raised while processing
+// the remaining header, assume the reader buffer to be in a corrupt state.
+// Also, this operation will block until enough bytes are available for peeking.
+func Read(reader *bufio.Reader) (*Header, error) {
+	// In order to improve speed for small non-PROXYed packets, take a peek at the first byte alone.
+	if b1, err := reader.Peek(1); err == nil && (bytes.Equal(b1[:1], SIGV1[:1]) || bytes.Equal(b1[:1], SIGV2[:1])) {
+		if signature, err := reader.Peek(5); err == nil && bytes.Equal(signature[:5], SIGV1) {
+			return parseVersion1(reader)
+		} else if signature, err := reader.Peek(12); err == nil && bytes.Equal(signature[:12], SIGV2) {
+			return parseVersion2(reader)
+		}
+	}
+
+	return nil, ErrNoProxyProtocol
+}
+
+// ReadTimeout acts as Read but takes a timeout. If that timeout is reached, it's assumed
+// there's no proxy protocol header.
+func ReadTimeout(reader *bufio.Reader, timeout time.Duration) (*Header, error) {
+	type header struct {
+		h *Header
+		e error
+	}
+	read := make(chan *header, 1)
+
+	go func() {
+		h := &header{}
+		h.h, h.e = Read(reader)
+		read <- h
+	}()
+
+	timer := time.NewTimer(timeout)
+	select {
+	case result := <-read:
+		timer.Stop()
+		return result.h, result.e
+	case <-timer.C:
+		return nil, ErrNoProxyProtocol
+	}
+}
diff --git a/vendor/github.com/pires/go-proxyproto/protocol.go b/vendor/github.com/pires/go-proxyproto/protocol.go
new file mode 100644
index 00000000000..13b6843af67
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/protocol.go
@@ -0,0 +1,136 @@
+package proxyproto
+
+import (
+	"bufio"
+	"net"
+	"sync"
+	"time"
+)
+
+// Listener is used to wrap an underlying listener,
+// whose connections may be using the HAProxy Proxy Protocol.
+// If the connection is using the protocol, the RemoteAddr() will return
+// the correct client address.
+//
+// Optionally define ProxyHeaderTimeout to set a maximum time to
+// receive the Proxy Protocol Header. Zero means no timeout.
+type Listener struct {
+	Listener           net.Listener
+	ProxyHeaderTimeout time.Duration
+}
+
+// Conn is used to wrap and underlying connection which
+// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will
+// return the address of the client instead of the proxy address.
+type Conn struct {
+	bufReader          *bufio.Reader
+	conn               net.Conn
+	header             *Header
+	once               sync.Once
+	proxyHeaderTimeout time.Duration
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (p *Listener) Accept() (net.Conn, error) {
+	// Get the underlying connection
+	conn, err := p.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+	return NewConn(conn, p.ProxyHeaderTimeout), nil
+}
+
+// Close closes the underlying listener.
+func (p *Listener) Close() error {
+	return p.Listener.Close()
+}
+
+// Addr returns the underlying listener's network address.
+func (p *Listener) Addr() net.Addr {
+	return p.Listener.Addr()
+}
+
+// NewConn is used to wrap a net.Conn that may be speaking
+// the proxy protocol into a proxyproto.Conn
+func NewConn(conn net.Conn, timeout time.Duration) *Conn {
+	pConn := &Conn{
+		bufReader:          bufio.NewReader(conn),
+		conn:               conn,
+		proxyHeaderTimeout: timeout,
+	}
+	return pConn
+}
+
+// Read is check for the proxy protocol header when doing
+// the initial scan. If there is an error parsing the header,
+// it is returned and the socket is closed.
+func (p *Conn) Read(b []byte) (int, error) {
+	var err error
+	p.once.Do(func() {
+		err = p.readHeader()
+	})
+	if err != nil {
+		return 0, err
+	}
+	return p.bufReader.Read(b)
+}
+
+// Write wraps original conn.Write
+func (p *Conn) Write(b []byte) (int, error) {
+	return p.conn.Write(b)
+}
+
+// Close wraps original conn.Close
+func (p *Conn) Close() error {
+	return p.conn.Close()
+}
+
+// LocalAddr returns the address of the server if the proxy
+// protocol is being used, otherwise just returns the address of
+// the socket server.
+func (p *Conn) LocalAddr() net.Addr {
+	p.once.Do(func() { p.readHeader() })
+	if p.header == nil {
+		return p.conn.LocalAddr()
+	}
+
+	return p.header.LocalAddr()
+}
+
+// RemoteAddr returns the address of the client if the proxy
+// protocol is being used, otherwise just returns the address of
+// the socket peer.
+func (p *Conn) RemoteAddr() net.Addr {
+	p.once.Do(func() { p.readHeader() })
+	if p.header == nil {
+		return p.conn.RemoteAddr()
+	}
+
+	return p.header.RemoteAddr()
+}
+
+// SetDeadline wraps original conn.SetDeadline
+func (p *Conn) SetDeadline(t time.Time) error {
+	return p.conn.SetDeadline(t)
+}
+
+// SetReadDeadline wraps original conn.SetReadDeadline
+func (p *Conn) SetReadDeadline(t time.Time) error {
+	return p.conn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline wraps original conn.SetWriteDeadline
+func (p *Conn) SetWriteDeadline(t time.Time) error {
+	return p.conn.SetWriteDeadline(t)
+}
+
+func (p *Conn) readHeader() (err error) {
+	p.header, err = Read(p.bufReader)
+	// For the purpose of this wrapper shamefully stolen from armon/go-proxyproto
+	// let's act as if there was no error when PROXY protocol is not present.
+	if err == ErrNoProxyProtocol {
+		err = nil
+	}
+
+	return
+}
diff --git a/vendor/github.com/pires/go-proxyproto/v1.go b/vendor/github.com/pires/go-proxyproto/v1.go
new file mode 100644
index 00000000000..ca9c104aa98
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/v1.go
@@ -0,0 +1,116 @@
+package proxyproto
+
+import (
+	"bufio"
+	"bytes"
+	"net"
+	"strconv"
+	"strings"
+)
+
+const (
+	CRLF      = "\r\n"
+	SEPARATOR = " "
+)
+
+func initVersion1() *Header {
+	header := new(Header)
+	header.Version = 1
+	// Command doesn't exist in v1
+	header.Command = PROXY
+	return header
+}
+
+func parseVersion1(reader *bufio.Reader) (*Header, error) {
+	// Make sure we have a v1 header
+	line, err := reader.ReadString('\n')
+	if !strings.HasSuffix(line, CRLF) {
+		return nil, ErrCantReadProtocolVersionAndCommand
+	}
+	tokens := strings.Split(line[:len(line)-2], SEPARATOR)
+	if len(tokens) < 6 {
+		return nil, ErrCantReadProtocolVersionAndCommand
+	}
+
+	header := initVersion1()
+
+	// Read address family and protocol
+	switch tokens[1] {
+	case "TCP4":
+		header.TransportProtocol = TCPv4
+	case "TCP6":
+		header.TransportProtocol = TCPv6
+	default:
+		header.TransportProtocol = UNSPEC
+	}
+
+	// Read addresses and ports
+	header.SourceAddress, err = parseV1IPAddress(header.TransportProtocol, tokens[2])
+	if err != nil {
+		return nil, err
+	}
+	header.DestinationAddress, err = parseV1IPAddress(header.TransportProtocol, tokens[3])
+	if err != nil {
+		return nil, err
+	}
+	header.SourcePort, err = parseV1PortNumber(tokens[4])
+	if err != nil {
+		return nil, err
+	}
+	header.DestinationPort, err = parseV1PortNumber(tokens[5])
+	if err != nil {
+		return nil, err
+	}
+	return header, nil
+}
+
+func (header *Header) formatVersion1() ([]byte, error) {
+	// As of version 1, only "TCP4" ( \x54 \x43 \x50 \x34 ) for TCP over IPv4,
+	// and "TCP6" ( \x54 \x43 \x50 \x36 ) for TCP over IPv6 are allowed.
+	proto := "UNKNOWN"
+	if header.TransportProtocol == TCPv4 {
+		proto = "TCP4"
+	} else if header.TransportProtocol == TCPv6 {
+		proto = "TCP6"
+	}
+
+	var buf bytes.Buffer
+	buf.Write(SIGV1)
+	buf.WriteString(SEPARATOR)
+	buf.WriteString(proto)
+	buf.WriteString(SEPARATOR)
+	buf.WriteString(header.SourceAddress.String())
+	buf.WriteString(SEPARATOR)
+	buf.WriteString(header.DestinationAddress.String())
+	buf.WriteString(SEPARATOR)
+	buf.WriteString(strconv.Itoa(int(header.SourcePort)))
+	buf.WriteString(SEPARATOR)
+	buf.WriteString(strconv.Itoa(int(header.DestinationPort)))
+	buf.WriteString(CRLF)
+
+	return buf.Bytes(), nil
+}
+
+func parseV1PortNumber(portStr string) (port uint16, err error) {
+	_port, _err := strconv.Atoi(portStr)
+	if _err == nil {
+		if _port < 0 || _port > 65535 {
+			err = ErrInvalidPortNumber
+		} else {
+			port = uint16(_port)
+		}
+	} else {
+		err = ErrInvalidPortNumber
+	}
+
+	return
+}
+
+func parseV1IPAddress(protocol AddressFamilyAndProtocol, addrStr string) (addr net.IP, err error) {
+	addr = net.ParseIP(addrStr)
+	tryV4 := addr.To4()
+	if (protocol == TCPv4 && tryV4 == nil) || (protocol == TCPv6 && tryV4 != nil) {
+		err = ErrInvalidAddress
+	}
+	return
+}
diff --git a/vendor/github.com/pires/go-proxyproto/v2.go b/vendor/github.com/pires/go-proxyproto/v2.go
new file mode 100644
index 00000000000..c0c83c8f7ef
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/v2.go
@@ -0,0 +1,202 @@
+package proxyproto
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"io"
+)
+
+var (
+	lengthV4   = uint16(12)
+	lengthV6   = uint16(36)
+	lengthUnix = uint16(218)
+
+	lengthV4Bytes = func() []byte {
+		a := make([]byte, 2)
+		binary.BigEndian.PutUint16(a, lengthV4)
+		return a
+	}()
+	lengthV6Bytes = func() []byte {
+		a := make([]byte, 2)
+		binary.BigEndian.PutUint16(a, lengthV6)
+		return a
+	}()
+	lengthUnixBytes = func() []byte {
+		a := make([]byte, 2)
+		binary.BigEndian.PutUint16(a, lengthUnix)
+		return a
+	}()
+)
+
+type _ports struct {
+	SrcPort uint16
+	DstPort uint16
+}
+
+type _addr4 struct {
+	Src     [4]byte
+	Dst     [4]byte
+	SrcPort uint16
+	DstPort uint16
+}
+
+type _addr6 struct {
+	Src [16]byte
+	Dst [16]byte
+	_ports
+}
+
+type _addrUnix struct {
+	Src [108]byte
+	Dst [108]byte
+}
+
+func parseVersion2(reader *bufio.Reader) (header *Header, err error) {
+	// Skip first 12 bytes (signature)
+	for i := 0; i < 12; i++ {
+		if _, err = reader.ReadByte(); err != nil {
+			return nil, ErrCantReadProtocolVersionAndCommand
+		}
+	}
+
+	header = new(Header)
+	header.Version = 2
+
+	// Read the 13th byte, protocol version and command
+	b13, err := reader.ReadByte()
+	if err != nil {
+		return nil, ErrCantReadProtocolVersionAndCommand
+	}
+	header.Command = ProtocolVersionAndCommand(b13)
+	if _, ok := supportedCommand[header.Command]; !ok {
+		return nil, ErrUnsupportedProtocolVersionAndCommand
+	}
+	// If command is LOCAL, header ends here
+	if header.Command.IsLocal() {
+		return header, nil
+	}
+
+	// Read the 14th byte, address family and protocol
+	b14, err := reader.ReadByte()
+	if err != nil {
+		return nil, ErrCantReadAddressFamilyAndProtocol
+	}
+	header.TransportProtocol = AddressFamilyAndProtocol(b14)
+	if _, ok := supportedTransportProtocol[header.TransportProtocol]; !ok {
+		return nil, ErrUnsupportedAddressFamilyAndProtocol
+	}
+
+	// Make sure there are bytes available as specified in length
+	var length uint16
+	if err := binary.Read(io.LimitReader(reader, 2), binary.BigEndian, &length); err != nil {
+		return nil, ErrCantReadLength
+	}
+	if !header.validateLength(length) {
+		return nil, ErrInvalidLength
+	}
+
+	if _, err := reader.Peek(int(length)); err != nil {
+		return nil, ErrInvalidLength
+	}
+
+	// Length-limited reader for payload section
+	payloadReader := io.LimitReader(reader, int64(length))
+
+	// Read addresses and ports
+	if header.TransportProtocol.IsIPv4() {
+		var addr _addr4
+		if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+			return nil, ErrInvalidAddress
+		}
+		header.SourceAddress = addr.Src[:]
+		header.DestinationAddress = addr.Dst[:]
+		header.SourcePort = addr.SrcPort
+		header.DestinationPort = addr.DstPort
+	} else if header.TransportProtocol.IsIPv6() {
+		var addr _addr6
+		if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+			return nil, ErrInvalidAddress
+		}
+		header.SourceAddress = addr.Src[:]
+		header.DestinationAddress = addr.Dst[:]
+		header.SourcePort = addr.SrcPort
+		header.DestinationPort = addr.DstPort
+	}
+	// TODO fully support Unix addresses
+	//	else if header.TransportProtocol.IsUnix() {
+	//		var addr _addrUnix
+	//		if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+	//			return nil, ErrInvalidAddress
+	//		}
+	//
+	//if header.SourceAddress, err = net.ResolveUnixAddr("unix", string(addr.Src[:])); err != nil {
+	//	return nil, ErrCantResolveSourceUnixAddress
+	//}
+	//if header.DestinationAddress, err = net.ResolveUnixAddr("unix", string(addr.Dst[:])); err != nil {
+	//	return nil, ErrCantResolveDestinationUnixAddress
+	//}
+	//}
+
+	// TODO add encapsulated TLV support
+
+	// Drain the remaining padding
+	payloadReader.Read(make([]byte, length))
+
+	return header, nil
+}
+
+func (header *Header) formatVersion2() ([]byte, error) {
+	var buf bytes.Buffer
+	buf.Write(SIGV2)
+	buf.WriteByte(header.Command.toByte())
+	if !header.Command.IsLocal() {
+		buf.WriteByte(header.TransportProtocol.toByte())
+		// TODO add encapsulated TLV length
+		var addrSrc, addrDst []byte
+		if header.TransportProtocol.IsIPv4() {
+			buf.Write(lengthV4Bytes)
+			addrSrc = header.SourceAddress.To4()
+			addrDst = header.DestinationAddress.To4()
+		} else if header.TransportProtocol.IsIPv6() {
+			buf.Write(lengthV6Bytes)
+			addrSrc = header.SourceAddress.To16()
+			addrDst = header.DestinationAddress.To16()
+		} else if header.TransportProtocol.IsUnix() {
+			buf.Write(lengthUnixBytes)
+			// TODO is below right?
+			addrSrc = []byte(header.SourceAddress.String())
+			addrDst = []byte(header.DestinationAddress.String())
+		}
+		buf.Write(addrSrc)
+		buf.Write(addrDst)
+
+		portSrcBytes := func() []byte {
+			a := make([]byte, 2)
+			binary.BigEndian.PutUint16(a, header.SourcePort)
+			return a
+		}()
+		buf.Write(portSrcBytes)
+
+		portDstBytes := func() []byte {
+			a := make([]byte, 2)
+			binary.BigEndian.PutUint16(a, header.DestinationPort)
+			return a
+		}()
+		buf.Write(portDstBytes)
+
+	}
+
+	return buf.Bytes(), nil
+}
+
+func (header *Header) validateLength(length uint16) bool {
+	if header.TransportProtocol.IsIPv4() {
+		return length >= lengthV4
+	} else if header.TransportProtocol.IsIPv6() {
+		return length >= lengthV6
+	} else if header.TransportProtocol.IsUnix() {
+		return length >= lengthUnix
+	}
+	return false
+}
diff --git a/vendor/github.com/pires/go-proxyproto/version_cmd.go b/vendor/github.com/pires/go-proxyproto/version_cmd.go
new file mode 100644
index 00000000000..2ee1a05060e
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/version_cmd.go
@@ -0,0 +1,39 @@
+package proxyproto
+
+// ProtocolVersionAndCommand represents proxy protocol version and command.
+type ProtocolVersionAndCommand byte
+
+const (
+	LOCAL = '\x20'
+	PROXY = '\x21'
+)
+
+var supportedCommand = map[ProtocolVersionAndCommand]bool{
+	LOCAL: true,
+	PROXY: true,
+}
+
+// IsLocal returns true if the protocol version is \x2 and command is LOCAL, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsLocal() bool {
+	return 0x20 == pvc&0xF0 && 0x00 == pvc&0x0F
+}
+
+// IsProxy returns true if the protocol version is \x2 and command is PROXY, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsProxy() bool {
+	return 0x20 == pvc&0xF0 && 0x01 == pvc&0x0F
+}
+
+// IsUnspec returns true if the protocol version or command is unspecified, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsUnspec() bool {
+	return !(pvc.IsLocal() || pvc.IsProxy())
+}
+
+func (pvc ProtocolVersionAndCommand) toByte() byte {
+	if pvc.IsLocal() {
+		return LOCAL
+	} else if pvc.IsProxy() {
+		return PROXY
+	}
+
+	return LOCAL
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 00000000000..daf913b1b34
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 00000000000..9159de03e03
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+  - 1.11.x
+  - 1.12.x
+  - 1.13.x
+  - tip
+
+script:
+  - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 00000000000..835ba3e755c
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney 
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000000..ce9d7cded64
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test: 
+	$(GO) test $(PKGS)
+
+vet: | test
+	$(GO) vet $(PKGS)
+
+staticcheck:
+	$(GO) get honnef.co/go/tools/cmd/staticcheck
+	staticcheck -checks all $(PKGS)
+
+misspell:
+	$(GO) get github.com/client9/misspell/cmd/misspell
+	misspell \
+		-locale GB \
+		-error \
+		*.md *.go
+
+unconvert:
+	$(GO) get github.com/mdempsky/unconvert
+	unconvert -v $(PKGS)
+
+ineffassign:
+	$(GO) get github.com/gordonklaus/ineffassign
+	find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+	$(GO) get mvdan.cc/unparam
+	unparam ./...
+
+errcheck:
+	$(GO) get github.com/kisielk/errcheck
+	errcheck $(PKGS)
+
+gofmt:  
+	@echo Checking code is gofmted
+	@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 00000000000..54dfdcb12ea
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+        return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+        return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+        Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+        // handle specifically
+default:
+        // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. 
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 00000000000..a932eade024
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+  # some helpful output for debugging builds
+  - go version
+  - go env
+  # pre-installed MinGW at C:\MinGW is 32bit only
+  # but MSYS2 at C:\msys64 has mingw64
+  - set PATH=C:\msys64\mingw64\bin;%PATH%
+  - gcc --version
+  - g++ --version
+
+build_script:
+  - go install -v ./...
+
+test_script:
+  - set PATH=C:\gopath\bin;%PATH%
+  - go test -v ./...
+
+#artifacts:
+#  - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 00000000000..161aea25829
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+//     if err != nil {
+//             return err
+//     }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+//     _, err := ioutil.ReadAll(r)
+//     if err != nil {
+//             return errors.Wrap(err, "read failed")
+//     }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+//     type causer interface {
+//             Cause() error
+//     }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+//     switch err := errors.Cause(err).(type) {
+//     case *MyError:
+//             // handle specifically
+//     default:
+//             // unknown error
+//     }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+//     %s    print the error. If the error has a Cause it will be
+//           printed recursively.
+//     %v    see %s
+//     %+v   extended format. Each Frame of the error's StackTrace will
+//           be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+//     type stackTracer interface {
+//             StackTrace() errors.StackTrace
+//     }
+//
+// The returned errors.StackTrace type is defined as
+//
+//     type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+//     if err, ok := err.(stackTracer); ok {
+//             for _, f := range err.StackTrace() {
+//                     fmt.Printf("%+s:%d\n", f, f)
+//             }
+//     }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+	"fmt"
+	"io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+	return &fundamental{
+		msg:   message,
+		stack: callers(),
+	}
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+	return &fundamental{
+		msg:   fmt.Sprintf(format, args...),
+		stack: callers(),
+	}
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+	msg string
+	*stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			io.WriteString(s, f.msg)
+			f.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, f.msg)
+	case 'q':
+		fmt.Fprintf(s, "%q", f.msg)
+	}
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+type withStack struct {
+	error
+	*stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v", w.Cause())
+			w.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, w.Error())
+	case 'q':
+		fmt.Fprintf(s, "%q", w.Error())
+	}
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   message,
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   message,
+	}
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+}
+
+type withMessage struct {
+	cause error
+	msg   string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error  { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v\n", w.Cause())
+			io.WriteString(s, w.msg)
+			return
+		}
+		fallthrough
+	case 's', 'q':
+		io.WriteString(s, w.Error())
+	}
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+//     type causer interface {
+//            Cause() error
+//     }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+	type causer interface {
+		Cause() error
+	}
+
+	for err != nil {
+		cause, ok := err.(causer)
+		if !ok {
+			break
+		}
+		err = cause.Cause()
+	}
+	return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000000..be0d10d0c79
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+	stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+	return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 00000000000..779a8348fb9
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+	"fmt"
+	"io"
+	"path"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	file, _ := fn.FileLine(f.pc())
+	return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return 0
+	}
+	_, line := fn.FileLine(f.pc())
+	return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+//    %s    source file
+//    %d    source line
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+s   function name and path of source file relative to the compile time
+//          GOPATH separated by \n\t (\n\t)
+//    %+v   equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 's':
+		switch {
+		case s.Flag('+'):
+			io.WriteString(s, f.name())
+			io.WriteString(s, "\n\t")
+			io.WriteString(s, f.file())
+		default:
+			io.WriteString(s, path.Base(f.file()))
+		}
+	case 'd':
+		io.WriteString(s, strconv.Itoa(f.line()))
+	case 'n':
+		io.WriteString(s, funcname(f.name()))
+	case 'v':
+		f.Format(s, 's')
+		io.WriteString(s, ":")
+		f.Format(s, 'd')
+	}
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+	name := f.name()
+	if name == "unknown" {
+		return []byte(name), nil
+	}
+	return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+//    %s	lists source files for each Frame in the stack
+//    %v	lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+v   Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case s.Flag('+'):
+			for _, f := range st {
+				io.WriteString(s, "\n")
+				f.Format(s, verb)
+			}
+		case s.Flag('#'):
+			fmt.Fprintf(s, "%#v", []Frame(st))
+		default:
+			st.formatSlice(s, verb)
+		}
+	case 's':
+		st.formatSlice(s, verb)
+	}
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+	io.WriteString(s, "[")
+	for i, f := range st {
+		if i > 0 {
+			io.WriteString(s, " ")
+		}
+		f.Format(s, verb)
+	}
+	io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case st.Flag('+'):
+			for _, pc := range *s {
+				f := Frame(pc)
+				fmt.Fprintf(st, "\n%+v", f)
+			}
+		}
+	}
+}
+
+func (s *stack) StackTrace() StackTrace {
+	f := make([]Frame, len(*s))
+	for i := 0; i < len(f); i++ {
+		f[i] = Frame((*s)[i])
+	}
+	return f
+}
+
+func callers() *stack {
+	const depth = 32
+	var pcs [depth]uintptr
+	n := runtime.Callers(3, pcs[:])
+	var st stack = pcs[0:n]
+	return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+	i := strings.LastIndex(name, "/")
+	name = name[i+1:]
+	i = strings.Index(name, ".")
+	return name[i+1:]
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 00000000000..c67dad612a3
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 00000000000..003e99fadb4
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+)
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/pmylund/go-cache/CONTRIBUTORS b/vendor/github.com/pmylund/go-cache/CONTRIBUTORS new file mode 100644 index 00000000000..2b16e997415 --- /dev/null +++ b/vendor/github.com/pmylund/go-cache/CONTRIBUTORS @@ -0,0 +1,9 @@ +This is a list of people who have contributed code to go-cache. They, or their +employers, are the copyright holders of the contributed code. Contributed code +is subject to the license restrictions listed in LICENSE (as they were when the +code was contributed.) + +Dustin Sallings +Jason Mooberry +Sergey Shepelev +Alex Edwards diff --git a/vendor/github.com/pmylund/go-cache/LICENSE b/vendor/github.com/pmylund/go-cache/LICENSE new file mode 100644 index 00000000000..db9903c75c5 --- /dev/null +++ b/vendor/github.com/pmylund/go-cache/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/pmylund/go-cache/README.md b/vendor/github.com/pmylund/go-cache/README.md new file mode 100644 index 00000000000..c5789cc66cc --- /dev/null +++ b/vendor/github.com/pmylund/go-cache/README.md @@ -0,0 +1,83 @@ +# go-cache + +go-cache is an in-memory key:value store/cache similar to memcached that is +suitable for applications running on a single machine. Its major advantage is +that, being essentially a thread-safe `map[string]interface{}` with expiration +times, it doesn't need to serialize or transmit its contents over the network. + +Any object can be stored, for a given duration or forever, and the cache can be +safely used by multiple goroutines. + +Although go-cache isn't meant to be used as a persistent datastore, the entire +cache can be saved to and loaded from a file (using `c.Items()` to retrieve the +items map to serialize, and `NewFrom()` to create a cache from a deserialized +one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) + +### Installation + +`go get github.com/patrickmn/go-cache` + +### Usage + +```go +import ( + "fmt" + "github.com/patrickmn/go-cache" + "time" +) + +func main() { + // Create a cache with a default expiration time of 5 minutes, and which + // purges expired items every 10 minutes + c := cache.New(5*time.Minute, 10*time.Minute) + + // Set the value of the key "foo" to "bar", with the default expiration time + c.Set("foo", "bar", cache.DefaultExpiration) + + // Set the value of the key "baz" to 42, with no expiration time + // (the item won't be removed until it is re-set, or removed using + // c.Delete("baz") + c.Set("baz", 42, cache.NoExpiration) + + // Get the string associated with the key "foo" from the cache + foo, found := c.Get("foo") + if found { + fmt.Println(foo) + } + + // Since Go is statically typed, and cache values can be anything, type + // assertion is needed when values are being passed to functions that don't + // take arbitrary types, (i.e. interface{}). The simplest way to do this for + // values which will only be used once--e.g. for passing to another + // function--is: + foo, found := c.Get("foo") + if found { + MyFunction(foo.(string)) + } + + // This gets tedious if the value is used several times in the same function. + // You might do either of the following instead: + if x, found := c.Get("foo"); found { + foo := x.(string) + // ... + } + // or + var foo string + if x, found := c.Get("foo"); found { + foo = x.(string) + } + // ... + // foo can then be passed around freely as a string + + // Want performance? Store pointers! + c.Set("foo", &MyStruct, cache.DefaultExpiration) + if x, found := c.Get("foo"); found { + foo := x.(*MyStruct) + // ... + } +} +``` + +### Reference + +`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache) diff --git a/vendor/github.com/pmylund/go-cache/cache.go b/vendor/github.com/pmylund/go-cache/cache.go new file mode 100644 index 00000000000..db88d2f2cb1 --- /dev/null +++ b/vendor/github.com/pmylund/go-cache/cache.go @@ -0,0 +1,1161 @@ +package cache + +import ( + "encoding/gob" + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +type Item struct { + Object interface{} + Expiration int64 +} + +// Returns true if the item has expired. +func (item Item) Expired() bool { + if item.Expiration == 0 { + return false + } + return time.Now().UnixNano() > item.Expiration +} + +const ( + // For use with functions that take an expiration time. + NoExpiration time.Duration = -1 + // For use with functions that take an expiration time. Equivalent to + // passing in the same expiration duration as was given to New() or + // NewFrom() when the cache was created (e.g. 5 minutes.) + DefaultExpiration time.Duration = 0 +) + +type Cache struct { + *cache + // If this is confusing, see the comment at the bottom of New() +} + +type cache struct { + defaultExpiration time.Duration + items map[string]Item + mu sync.RWMutex + onEvicted func(string, interface{}) + janitor *janitor +} + +// Add an item to the cache, replacing any existing item. If the duration is 0 +// (DefaultExpiration), the cache's default expiration time is used. If it is -1 +// (NoExpiration), the item never expires. +func (c *cache) Set(k string, x interface{}, d time.Duration) { + // "Inlining" of set + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + } + // TODO: Calls to mu.Unlock are currently not deferred because defer + // adds ~200 ns (as of go1.) + c.mu.Unlock() +} + +func (c *cache) set(k string, x interface{}, d time.Duration) { + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.items[k] = Item{ + Object: x, + Expiration: e, + } +} + +// Add an item to the cache, replacing any existing item, using the default +// expiration. +func (c *cache) SetDefault(k string, x interface{}) { + c.Set(k, x, DefaultExpiration) +} + +// Add an item to the cache only if an item doesn't already exist for the given +// key, or if the existing item has expired. Returns an error otherwise. +func (c *cache) Add(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if found { + c.mu.Unlock() + return fmt.Errorf("Item %s already exists", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Set a new value for the cache key only if it already exists, and the existing +// item hasn't expired. Returns an error otherwise. +func (c *cache) Replace(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if !found { + c.mu.Unlock() + return fmt.Errorf("Item %s doesn't exist", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Get an item from the cache. Returns the item or nil, and a bool indicating +// whether the key was found. +func (c *cache) Get(k string) (interface{}, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, false + } + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, false + } + } + c.mu.RUnlock() + return item.Object, true +} + +// GetWithExpiration returns an item and its expiration time from the cache. +// It returns the item or nil, the expiration time if one is set (if the item +// never expires a zero value for time.Time is returned), and a bool indicating +// whether the key was found. +func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + // Return the item and the expiration time + c.mu.RUnlock() + return item.Object, time.Unix(0, item.Expiration), true + } + + // If expiration <= 0 (i.e. no expiration time set) then return the item + // and a zeroed time.Time + c.mu.RUnlock() + return item.Object, time.Time{}, true +} + +func (c *cache) get(k string) (interface{}, bool) { + item, found := c.items[k] + if !found { + return nil, false + } + // "Inlining" of Expired + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + return nil, false + } + } + return item.Object, true +} + +// Increment an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to increment it by n. To retrieve the incremented value, use one +// of the specialized methods, e.g. IncrementInt64. +func (c *cache) Increment(k string, n int64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) + int(n) + case int8: + v.Object = v.Object.(int8) + int8(n) + case int16: + v.Object = v.Object.(int16) + int16(n) + case int32: + v.Object = v.Object.(int32) + int32(n) + case int64: + v.Object = v.Object.(int64) + n + case uint: + v.Object = v.Object.(uint) + uint(n) + case uintptr: + v.Object = v.Object.(uintptr) + uintptr(n) + case uint8: + v.Object = v.Object.(uint8) + uint8(n) + case uint16: + v.Object = v.Object.(uint16) + uint16(n) + case uint32: + v.Object = v.Object.(uint32) + uint32(n) + case uint64: + v.Object = v.Object.(uint64) + uint64(n) + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to increment it by n. Pass a negative number to decrement the +// value. To retrieve the incremented value, use one of the specialized methods, +// e.g. IncrementFloat64. +func (c *cache) IncrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint8 by n. Returns an error if the item's value +// is not an uint8, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to decrement it by n. To retrieve the decremented value, use one +// of the specialized methods, e.g. DecrementInt64. +func (c *cache) Decrement(k string, n int64) error { + // TODO: Implement Increment and Decrement more cleanly. + // (Cannot do Increment(k, n*-1) for uints.) + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item not found") + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) - int(n) + case int8: + v.Object = v.Object.(int8) - int8(n) + case int16: + v.Object = v.Object.(int16) - int16(n) + case int32: + v.Object = v.Object.(int32) - int32(n) + case int64: + v.Object = v.Object.(int64) - n + case uint: + v.Object = v.Object.(uint) - uint(n) + case uintptr: + v.Object = v.Object.(uintptr) - uintptr(n) + case uint8: + v.Object = v.Object.(uint8) - uint8(n) + case uint16: + v.Object = v.Object.(uint16) - uint16(n) + case uint32: + v.Object = v.Object.(uint32) - uint32(n) + case uint64: + v.Object = v.Object.(uint64) - uint64(n) + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to decrement it by n. Pass a negative number to decrement the +// value. To retrieve the decremented value, use one of the specialized methods, +// e.g. DecrementFloat64. +func (c *cache) DecrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint8 by n. Returns an error if the item's value is +// not an uint8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Delete an item from the cache. Does nothing if the key is not in the cache. +func (c *cache) Delete(k string) { + c.mu.Lock() + v, evicted := c.delete(k) + c.mu.Unlock() + if evicted { + c.onEvicted(k, v) + } +} + +func (c *cache) delete(k string) (interface{}, bool) { + if c.onEvicted != nil { + if v, found := c.items[k]; found { + delete(c.items, k) + return v.Object, true + } + } + delete(c.items, k) + return nil, false +} + +type keyAndValue struct { + key string + value interface{} +} + +// Delete all expired items from the cache. +func (c *cache) DeleteExpired() { + var evictedItems []keyAndValue + now := time.Now().UnixNano() + c.mu.Lock() + for k, v := range c.items { + // "Inlining" of expired + if v.Expiration > 0 && now > v.Expiration { + ov, evicted := c.delete(k) + if evicted { + evictedItems = append(evictedItems, keyAndValue{k, ov}) + } + } + } + c.mu.Unlock() + for _, v := range evictedItems { + c.onEvicted(v.key, v.value) + } +} + +// Sets an (optional) function that is called with the key and value when an +// item is evicted from the cache. (Including when it is deleted manually, but +// not when it is overwritten.) Set to nil to disable. +func (c *cache) OnEvicted(f func(string, interface{})) { + c.mu.Lock() + c.onEvicted = f + c.mu.Unlock() +} + +// Write the cache's items (using Gob) to an io.Writer. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Save(w io.Writer) (err error) { + enc := gob.NewEncoder(w) + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("Error registering item types with Gob library") + } + }() + c.mu.RLock() + defer c.mu.RUnlock() + for _, v := range c.items { + gob.Register(v.Object) + } + err = enc.Encode(&c.items) + return +} + +// Save the cache's items to the given filename, creating the file if it +// doesn't exist, and overwriting it if it does. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) SaveFile(fname string) error { + fp, err := os.Create(fname) + if err != nil { + return err + } + err = c.Save(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Add (Gob-serialized) cache items from an io.Reader, excluding any items with +// keys that already exist (and haven't expired) in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Load(r io.Reader) error { + dec := gob.NewDecoder(r) + items := map[string]Item{} + err := dec.Decode(&items) + if err == nil { + c.mu.Lock() + defer c.mu.Unlock() + for k, v := range items { + ov, found := c.items[k] + if !found || ov.Expired() { + c.items[k] = v + } + } + } + return err +} + +// Load and add cache items from the given filename, excluding any items with +// keys that already exist in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) LoadFile(fname string) error { + fp, err := os.Open(fname) + if err != nil { + return err + } + err = c.Load(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Copies all unexpired items in the cache into a new map and returns it. +func (c *cache) Items() map[string]Item { + c.mu.RLock() + defer c.mu.RUnlock() + m := make(map[string]Item, len(c.items)) + now := time.Now().UnixNano() + for k, v := range c.items { + // "Inlining" of Expired + if v.Expiration > 0 { + if now > v.Expiration { + continue + } + } + m[k] = v + } + return m +} + +// Returns the number of items in the cache. This may include items that have +// expired, but have not yet been cleaned up. +func (c *cache) ItemCount() int { + c.mu.RLock() + n := len(c.items) + c.mu.RUnlock() + return n +} + +// Delete all items from the cache. +func (c *cache) Flush() { + c.mu.Lock() + c.items = map[string]Item{} + c.mu.Unlock() +} + +type janitor struct { + Interval time.Duration + stop chan bool +} + +func (j *janitor) Run(c *cache) { + ticker := time.NewTicker(j.Interval) + for { + select { + case <-ticker.C: + c.DeleteExpired() + case <-j.stop: + ticker.Stop() + return + } + } +} + +func stopJanitor(c *Cache) { + c.janitor.stop <- true +} + +func runJanitor(c *cache, ci time.Duration) { + j := &janitor{ + Interval: ci, + stop: make(chan bool), + } + c.janitor = j + go j.Run(c) +} + +func newCache(de time.Duration, m map[string]Item) *cache { + if de == 0 { + de = -1 + } + c := &cache{ + defaultExpiration: de, + items: m, + } + return c +} + +func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache { + c := newCache(de, m) + // This trick ensures that the janitor goroutine (which--granted it + // was enabled--is running DeleteExpired on c forever) does not keep + // the returned C object from being garbage collected. When it is + // garbage collected, the finalizer stops the janitor goroutine, after + // which c can be collected. + C := &Cache{c} + if ci > 0 { + runJanitor(c, ci) + runtime.SetFinalizer(C, stopJanitor) + } + return C +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +func New(defaultExpiration, cleanupInterval time.Duration) *Cache { + items := make(map[string]Item) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +// +// NewFrom() also accepts an items map which will serve as the underlying map +// for the cache. This is useful for starting from a deserialized cache +// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g. +// make(map[string]Item, 500) to improve startup performance when the cache +// is expected to reach a certain minimum size. +// +// Only the cache's methods synchronize access to this map, so it is not +// recommended to keep any references to the map around after creating a cache. +// If need be, the map can be accessed at a later point using c.Items() (subject +// to the same caveat.) +// +// Note regarding serialization: When using e.g. gob, make sure to +// gob.Register() the individual types stored in the cache before encoding a +// map retrieved with c.Items(), and to register those same types before +// decoding a blob containing an items map. +func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache { + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} diff --git a/vendor/github.com/pmylund/go-cache/sharded.go b/vendor/github.com/pmylund/go-cache/sharded.go new file mode 100644 index 00000000000..bcc0538bcc7 --- /dev/null +++ b/vendor/github.com/pmylund/go-cache/sharded.go @@ -0,0 +1,192 @@ +package cache + +import ( + "crypto/rand" + "math" + "math/big" + insecurerand "math/rand" + "os" + "runtime" + "time" +) + +// This is an experimental and unexported (for now) attempt at making a cache +// with better algorithmic complexity than the standard one, namely by +// preventing write locks of the entire cache when an item is added. As of the +// time of writing, the overhead of selecting buckets results in cache +// operations being about twice as slow as for the standard cache with small +// total cache sizes, and faster for larger ones. +// +// See cache_test.go for a few benchmarks. + +type unexportedShardedCache struct { + *shardedCache +} + +type shardedCache struct { + seed uint32 + m uint32 + cs []*cache + janitor *shardedJanitor +} + +// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead. +func djb33(seed uint32, k string) uint32 { + var ( + l = uint32(len(k)) + d = 5381 + seed + l + i = uint32(0) + ) + // Why is all this 5x faster than a for loop? + if l >= 4 { + for i < l-4 { + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + d = (d * 33) ^ uint32(k[i+3]) + i += 4 + } + } + switch l - i { + case 1: + case 2: + d = (d * 33) ^ uint32(k[i]) + case 3: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + case 4: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + } + return d ^ (d >> 16) +} + +func (sc *shardedCache) bucket(k string) *cache { + return sc.cs[djb33(sc.seed, k)%sc.m] +} + +func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) { + sc.bucket(k).Set(k, x, d) +} + +func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Add(k, x, d) +} + +func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Replace(k, x, d) +} + +func (sc *shardedCache) Get(k string) (interface{}, bool) { + return sc.bucket(k).Get(k) +} + +func (sc *shardedCache) Increment(k string, n int64) error { + return sc.bucket(k).Increment(k, n) +} + +func (sc *shardedCache) IncrementFloat(k string, n float64) error { + return sc.bucket(k).IncrementFloat(k, n) +} + +func (sc *shardedCache) Decrement(k string, n int64) error { + return sc.bucket(k).Decrement(k, n) +} + +func (sc *shardedCache) Delete(k string) { + sc.bucket(k).Delete(k) +} + +func (sc *shardedCache) DeleteExpired() { + for _, v := range sc.cs { + v.DeleteExpired() + } +} + +// Returns the items in the cache. This may include items that have expired, +// but have not yet been cleaned up. If this is significant, the Expiration +// fields of the items should be checked. Note that explicit synchronization +// is needed to use a cache and its corresponding Items() return values at +// the same time, as the maps are shared. +func (sc *shardedCache) Items() []map[string]Item { + res := make([]map[string]Item, len(sc.cs)) + for i, v := range sc.cs { + res[i] = v.Items() + } + return res +} + +func (sc *shardedCache) Flush() { + for _, v := range sc.cs { + v.Flush() + } +} + +type shardedJanitor struct { + Interval time.Duration + stop chan bool +} + +func (j *shardedJanitor) Run(sc *shardedCache) { + j.stop = make(chan bool) + tick := time.Tick(j.Interval) + for { + select { + case <-tick: + sc.DeleteExpired() + case <-j.stop: + return + } + } +} + +func stopShardedJanitor(sc *unexportedShardedCache) { + sc.janitor.stop <- true +} + +func runShardedJanitor(sc *shardedCache, ci time.Duration) { + j := &shardedJanitor{ + Interval: ci, + } + sc.janitor = j + go j.Run(sc) +} + +func newShardedCache(n int, de time.Duration) *shardedCache { + max := big.NewInt(0).SetUint64(uint64(math.MaxUint32)) + rnd, err := rand.Int(rand.Reader, max) + var seed uint32 + if err != nil { + os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n")) + seed = insecurerand.Uint32() + } else { + seed = uint32(rnd.Uint64()) + } + sc := &shardedCache{ + seed: seed, + m: uint32(n), + cs: make([]*cache, n), + } + for i := 0; i < n; i++ { + c := &cache{ + defaultExpiration: de, + items: map[string]Item{}, + } + sc.cs[i] = c + } + return sc +} + +func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache { + if defaultExpiration == 0 { + defaultExpiration = -1 + } + sc := newShardedCache(shards, defaultExpiration) + SC := &unexportedShardedCache{sc} + if cleanupInterval > 0 { + runShardedJanitor(sc, cleanupInterval) + runtime.SetFinalizer(SC, stopShardedJanitor) + } + return SC +} diff --git a/vendor/github.com/qri-io/jsonpointer/.gitignore b/vendor/github.com/qri-io/jsonpointer/.gitignore new file mode 100644 index 00000000000..f45e81b44f0 --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +coverage.txt \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonpointer/CHANGELOG.md b/vendor/github.com/qri-io/jsonpointer/CHANGELOG.md new file mode 100644 index 00000000000..8be0635a57e --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/CHANGELOG.md @@ -0,0 +1,29 @@ +# (2020-05-06) + +This is an update to jsonpointer. It adds usability functions and options for perfomance optimized use. + +### Bug Fixes + +* **Test:** fix failing tests when using go 1.14. [c51da06](https://github.com/qri-io/jsonpointer/commit/c51da06b3a9796e12c0a8309b728b015c01387c0) + +### Features + +* **Head,Tail,IsEmpty:** added methods to get the first token, all tokens after the head and to check if a given pointer is empty [c51da06](https://github.com/qri-io/jsonpointer/commit/c51da06b3a9796e12c0a8309b728b015c01387c0) +* **RawDescendant,NewPointer:** methods that allow to directly append to the current pointer without safety checks and a way to create a pointer with pre-allocated memory for performance intensive use cases [c51da06](https://github.com/qri-io/jsonpointer/commit/c51da06b3a9796e12c0a8309b728b015c01387c0) + +# (2019-05-23) + +This is the first proper release of jsonpointer. In preparation for go 1.13, in which `go.mod` files and go modules are the primary way to handle go dependencies, we are going to do an official release of all our modules. This will be version v0.1.0 of jsonpointer. + +### Bug Fixes + +* **Parse:** fix incorrect handling of empty url fragment strings ([5919095](https://github.com/qri-io/jsonpointer/commit/5919095)) + + +### Features + +* **Descendant,WalkJSON:** added pointer descendant method, experimental WalkJSON func ([707e879](https://github.com/qri-io/jsonpointer/commit/707e879)) +* initial commit ([448ab45](https://github.com/qri-io/jsonpointer/commit/448ab45)) + + + diff --git a/vendor/github.com/qri-io/jsonpointer/LICENSE b/vendor/github.com/qri-io/jsonpointer/LICENSE new file mode 100644 index 00000000000..75c1a28d709 --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Brendan O'Brien + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonpointer/Makefile b/vendor/github.com/qri-io/jsonpointer/Makefile new file mode 100644 index 00000000000..d025612f4e4 --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/Makefile @@ -0,0 +1,3 @@ +# Let's keep all our changelog commands the same across all our packages: +update-changelog: + conventional-changelog -p angular -i CHANGELOG.md -s \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonpointer/README.md b/vendor/github.com/qri-io/jsonpointer/README.md new file mode 100644 index 00000000000..3f973accbe5 --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/README.md @@ -0,0 +1,62 @@ +[![Qri](https://img.shields.io/badge/made%20by-qri-magenta.svg?style=flat-square)](https://qri.io) +[![GoDoc](https://godoc.org/github.com/qri-io/jsonpointer?status.svg)](http://godoc.org/github.com/qri-io/jsonpointer) +[![License](https://img.shields.io/github/license/qri-io/jsonpointer.svg?style=flat-square)](./LICENSE) +[![Codecov](https://img.shields.io/codecov/c/github/qri-io/jsonpointer.svg?style=flat-square)](https://codecov.io/gh/qri-io/jsonpointer) +[![CI](https://img.shields.io/circleci/project/github/qri-io/jsonpointer.svg?style=flat-square)](https://circleci.com/gh/qri-io/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/qri-io/jsonpointer)](https://goreportcard.com/report/github.com/qri-io/jsonpointer) + + +# jsonpointer +golang implementation of [IETF RFC6901](https://tools.ietf.org/html/rfc6901): +_JSON Pointer defines a string syntax for identifying a specific value within a JavaScript Object Notation (JSON) document._ + +### Installation +install with: +`go get -u github.com/qri-io/jsonpointer` + + +### Usage +Here's a quick example pulled from the [godoc](https://godoc.org/github.com/qri-io/jsonpointer): + +```go +import ( + "encoding/json" + "fmt" + "github.com/qri-io/jsonpointer" +) + +var document = []byte(`{ + "foo": { + "bar": { + "baz": [0,"hello!"] + } + } +}`) + +func main() { + parsed := map[string]interface{}{} + // be sure to handle errors in real-world code! + json.Unmarshal(document, &parsed) + + // parse a json pointer. Pointers can also be url fragments + // the following are equivelent pointers: + // "/foo/bar/baz/1" + // "#/foo/bar/baz/1" + // "http://example.com/document.json#/foo/bar/baz/1" + ptr, _ := jsonpointer.Parse("/foo/bar/baz/1") + + // evaluate the pointer against the document + // evaluation always starts at the root of the document + got, _ := ptr.Eval(parsed) + + fmt.Println(got) + // Output: hello! +} + +``` + +### License +MIT + +### Issues & Contributions +Contributions & Issues are more than welcome! Everything happens over on this repo's [github page](https://github.com/qri-io/jsonpointer) \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonpointer/codecov.yml b/vendor/github.com/qri-io/jsonpointer/codecov.yml new file mode 100644 index 00000000000..1a90176d045 --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/codecov.yml @@ -0,0 +1,9 @@ +codecov: + ci: + - "ci/circle-ci" + notify: + require_ci_to_pass: no + after_n_builds: 2 +coverage: + range: "80...100" +comment: off \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonpointer/go.mod b/vendor/github.com/qri-io/jsonpointer/go.mod new file mode 100644 index 00000000000..a00555d67cd --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/go.mod @@ -0,0 +1 @@ +module github.com/qri-io/jsonpointer diff --git a/vendor/github.com/qri-io/jsonpointer/pointer.go b/vendor/github.com/qri-io/jsonpointer/pointer.go new file mode 100644 index 00000000000..13f3ccdccdc --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/pointer.go @@ -0,0 +1,185 @@ +// Package jsonpointer implements IETF rfc6901 +// JSON Pointers are a string syntax for +// identifying a specific value within a JavaScript Object Notation +// (JSON) document [RFC4627]. JSON Pointer is intended to be easily +// expressed in JSON string values as well as Uniform Resource +// Identifier (URI) [RFC3986] fragment identifiers. +// +// this package is intended to work like net/url from the go +// standard library +package jsonpointer + +import ( + "fmt" + "net/url" + "strconv" + "strings" +) + +const defaultPointerAllocationSize = 32 + +// Parse parses str into a Pointer structure. +// str may be a pointer or a url string. +// If a url string, Parse will use the URL's fragment component +// (the bit after the '#' symbol) +func Parse(str string) (Pointer, error) { + // fast paths that skip url parse step + if len(str) == 0 || str == "#" { + return Pointer{}, nil + } else if str[0] == '/' { + return parse(str) + } + + u, err := url.Parse(str) + if err != nil { + return nil, err + } + return parse(u.Fragment) +} + +// IsEmpty is a utility function to check if the Pointer +// is empty / nil equivalent +func (p Pointer) IsEmpty() bool { + return len(p) == 0 +} + +// Head returns the root of the Pointer +func (p Pointer) Head() *string { + if len(p) == 0 { + return nil + } + return &p[0] +} + +// Tail returns everything after the Pointer head +func (p Pointer) Tail() Pointer { + return Pointer(p[1:]) +} + +// The ABNF syntax of a JSON Pointer is: +// json-pointer = *( "/" reference-token ) +// reference-token = *( unescaped / escaped ) +// unescaped = %x00-2E / %x30-7D / %x7F-10FFFF +// ; %x2F ('/') and %x7E ('~') are excluded from 'unescaped' +// escaped = "~" ( "0" / "1" ) +// ; representing '~' and '/', respectively +func parse(str string) (Pointer, error) { + if len(str) == 0 { + return Pointer{}, nil + } + + if str[0] != '/' { + return nil, fmt.Errorf("non-empty references must begin with a '/' character") + } + str = str[1:] + + toks := strings.Split(str, separator) + for i, t := range toks { + toks[i] = unescapeToken(t) + } + return Pointer(toks), nil +} + +// Pointer represents a parsed JSON pointer +type Pointer []string + +// NewPointer creates a Pointer with a pre-allocated block of memory +// to avoid repeated slice expansions +func NewPointer() Pointer { + return make([]string, 0, defaultPointerAllocationSize) +} + +// String implements the stringer interface for Pointer, +// giving the escaped string +func (p Pointer) String() (str string) { + for _, tok := range p { + str += "/" + escapeToken(tok) + } + return +} + +// Eval evaluates a json pointer against a given root JSON document +// Evaluation of a JSON Pointer begins with a reference to the root +// value of a JSON document and completes with a reference to some value +// within the document. Each reference token in the JSON Pointer is +// evaluated sequentially. +func (p Pointer) Eval(data interface{}) (result interface{}, err error) { + result = data + for _, tok := range p { + if result, err = p.evalToken(tok, result); err != nil { + return nil, err + } + } + return +} + +// Descendant returns a new pointer to a descendant of the current pointer +// parsing the input path into components +func (p Pointer) Descendant(path string) (Pointer, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + dpath, err := parse(path) + if err != nil { + return p, err + } + + if p.String() == "/" { + return dpath, nil + } + + return append(p, dpath...), nil +} + +// RawDescendant extends the pointer with 1 or more path tokens +// The function itself is unsafe as it doesnt fully parse the input +// and assumes the user is directly managing the pointer +// This allows for much faster pointer management +func (p Pointer) RawDescendant(path ...string) Pointer { + return append(p, path...) +} + +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. By performing the +// substitutions in this order, an implementation avoids the error of +// turning '~01' first into '~1' and then into '/', which would be +// incorrect (the string '~01' correctly becomes '~1' after +// transformation). +// The reference token then modifies which value is referenced according +// to the following scheme: +func (p Pointer) evalToken(tok string, data interface{}) (interface{}, error) { + switch ch := data.(type) { + case map[string]interface{}: + return ch[tok], nil + case []interface{}: + i, err := strconv.Atoi(tok) + if err != nil { + return nil, fmt.Errorf("invalid array index: %s", tok) + } + if i >= len(ch) { + return nil, fmt.Errorf("index %d exceeds array length of %d", i, len(ch)) + } + return ch[i], nil + default: + return nil, fmt.Errorf("invalid JSON pointer: %s", p.String()) + } +} + +const ( + separator = "/" + escapedSeparator = "~1" + tilde = "~" + escapedTilde = "~0" +) + +func unescapeToken(tok string) string { + tok = strings.Replace(tok, escapedSeparator, separator, -1) + return strings.Replace(tok, escapedTilde, tilde, -1) +} + +func escapeToken(tok string) string { + tok = strings.Replace(tok, tilde, escapedTilde, -1) + return strings.Replace(tok, separator, escapedSeparator, -1) +} diff --git a/vendor/github.com/qri-io/jsonpointer/traversal.go b/vendor/github.com/qri-io/jsonpointer/traversal.go new file mode 100644 index 00000000000..c7453bfe17e --- /dev/null +++ b/vendor/github.com/qri-io/jsonpointer/traversal.go @@ -0,0 +1,99 @@ +package jsonpointer + +import ( + "reflect" +) + +// JSONContainer returns any existing child value for a given JSON property string +type JSONContainer interface { + // JSONProp takes a string reference for a given JSON property. + // implementations must return any matching property of that name, + // nil if no such subproperty exists. + // Note that implementations on slice-types are expected to convert + // prop to an integer value + JSONProp(prop string) interface{} +} + +// JSONParent is an interface that enables tree traversal by listing +// all immediate children of an object +type JSONParent interface { + // JSONChildren should return all immidiate children of this element + // with json property names as keys, go types as values + // Note that implementations on slice-types are expected to convert + // integers to string keys + JSONProps() map[string]interface{} +} + +// WalkJSON calls visit on all elements in a tree of decoded json +func WalkJSON(tree interface{}, visit func(elem interface{}) error) error { + if tree == nil { + return nil + } + + if err := visit(tree); err != nil { + return err + } + + if con, ok := tree.(JSONParent); ok { + for _, ch := range con.JSONProps() { + if err := WalkJSON(ch, visit); err != nil { + return err + } + } + return nil + } + + // fast-path for common json types + switch t := tree.(type) { + case map[string]interface{}: + for _, val := range t { + if err := WalkJSON(val, visit); err != nil { + return err + } + } + return nil + case []interface{}: + for _, val := range t { + if err := WalkJSON(val, visit); err != nil { + return err + } + } + return nil + } + + return walkValue(reflect.ValueOf(tree), visit) +} + +func walkValue(v reflect.Value, visit func(elem interface{}) error) error { + switch v.Kind() { + case reflect.Invalid: + return nil + case reflect.Ptr: + if !v.IsNil() { + walkValue(v.Elem(), visit) + } + case reflect.Map: + for _, key := range v.MapKeys() { + mi := v.MapIndex(key) + if mi.CanInterface() { + WalkJSON(mi.Interface(), visit) + } + } + case reflect.Struct: + // t := v.Type() + // TypeOf returns the reflection Type that represents the dynamic type of variable. + // If variable is a nil interface value, TypeOf returns nil. + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + // fmt.Printf("%d: %s %s %s = %v\n", i, t.Field(i).Name, f.Type(), t.Field(i).Tag.Get("json"), f.CanInterface()) + if f.CanInterface() { + WalkJSON(f.Interface(), visit) + } + } + case reflect.Slice, reflect.Array: + for i := 0; i < v.Len(); i++ { + WalkJSON(v.Index(i).Interface(), visit) + } + } + return nil +} diff --git a/vendor/github.com/qri-io/jsonschema/.gitignore b/vendor/github.com/qri-io/jsonschema/.gitignore new file mode 100644 index 00000000000..c378da30b36 --- /dev/null +++ b/vendor/github.com/qri-io/jsonschema/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +coverage.txt +.vscode \ No newline at end of file diff --git a/vendor/github.com/qri-io/jsonschema/CHANGELOG.md b/vendor/github.com/qri-io/jsonschema/CHANGELOG.md new file mode 100644 index 00000000000..eac1c48c30a --- /dev/null +++ b/vendor/github.com/qri-io/jsonschema/CHANGELOG.md @@ -0,0 +1,75 @@ +# [](https://github.com/qri-io/jsonschema/compare/v0.2.0...v) (2021-03-29) + + +### Bug Fixes + +* **error:** show error message for `minLength` ([#73](https://github.com/qri-io/jsonschema/issues/73)) ([0995c6b](https://github.com/qri-io/jsonschema/commit/0995c6b04506cc858dee03b166771edeeab95f64)) + + +### Features + +* **resolve:** file URI resolution ([#90](https://github.com/qri-io/jsonschema/issues/90)) ([dbc3af1](https://github.com/qri-io/jsonschema/commit/dbc3af1d666cc034a9ac89f10fba0ad6d5cb6c8e)) +* **type:** support additional number types ([#72](https://github.com/qri-io/jsonschema/issues/72)) ([9874480](https://github.com/qri-io/jsonschema/commit/9874480d05ec5edf3e0c19873bd2bd4fb322b3fe)) + + + +# [](https://github.com/qri-io/jsonschema/compare/v0.1.2...v) (2020-05-21) + +This is relase v0.2.0. It's a rework of the jsonschema implementation which now has better support for the spec, equal or better performance depending on the keyword, possibility to easily extend with your own keywords and finally, draft2019_09 support. + +### Features + +* **jsonschema:** reworking json schema (migration to draft2019_09) ([bb2a1cf](https://github.com/qri-io/jsonschema/commit/bb2a1cf423024a5144c05dcced8f1226fd7e65b9)) + + +# [](https://github.com/qri-io/jsonschema/compare/v0.1.1...v) (2020-05-21) + +This is a patch release of jsonschema to mark v0.1.2. The purpose of it is to provide a stable v0.1 version for managing the dependencies as the upcoming v0.2.0 will break a lot of the existing API. + +### Bug Fixes + +* Typo ([#52](https://github.com/qri-io/jsonschema/issues/52)) ([9f11b79](https://github.com/qri-io/jsonschema/commit/9f11b79125715650da0b4932b3ca66328b508ac7)) + + +### Features + +* **type:** identify custom struct as objects ([c1722b7](https://github.com/qri-io/jsonschema/commit/c1722b720fafa56f0514e08063b5a3c6baa73863)) + + + +# (2019-05-23) + +This is the first proper release of jsonschema. In preparation for go 1.13, in which `go.mod` files and go modules are the primary way to handle go dependencies, we are going to do an official release of all our modules. This will be version v0.1.1 of jsonschema. + + +### Bug Fixes + +* **jsonschema:** Handle empty url fragment "#", add unit tests. ([ca0e82f](https://github.com/qri-io/jsonschema/commit/ca0e82f)) +* An issue where if $id starts with # caused a slice bounds out of range panic while Unmarshaling ([9f6179a](https://github.com/qri-io/jsonschema/commit/9f6179a)) +* **$comment:** add support for $comment keyword, add $comment to testschema_test ExampleBasic() ([#33](https://github.com/qri-io/jsonschema/issues/33)) ([3313399](https://github.com/qri-io/jsonschema/commit/3313399)) +* **const error:** error reports what const must equal instead of supplied value ([9b9427b](https://github.com/qri-io/jsonschema/commit/9b9427b)), closes [#34](https://github.com/qri-io/jsonschema/issues/34) + + +### Features + +* **IfThenElse:** implement If/Then/Else, cleanup ([bef9c1e](https://github.com/qri-io/jsonschema/commit/bef9c1e)) +* **json.Marshaler:** marshal schemas back to json properly. ([f7d8215](https://github.com/qri-io/jsonschema/commit/f7d8215)) +* **jsonschema:** Change to TopLevelType function, more general. ([4a66928](https://github.com/qri-io/jsonschema/commit/4a66928)) +* **jsonschema:** Cleanup mistakes, test for unknown schema type. ([9ab452b](https://github.com/qri-io/jsonschema/commit/9ab452b)) +* **jsonschema:** Field to tell if RootSchema is an array or object. ([8bd68f0](https://github.com/qri-io/jsonschema/commit/8bd68f0)) +* **jsonschema format:** added iri, iri-ref, regex format validators ([06217c5](https://github.com/qri-io/jsonschema/commit/06217c5)) +* **jsonschema format:** added iri, iri-ref, regex format validators ([4e5183a](https://github.com/qri-io/jsonschema/commit/4e5183a)) +* **jsonschema format:** added jsonpointer, reljsonpointer validators ([6205399](https://github.com/qri-io/jsonschema/commit/6205399)) +* **refs:** first signs of life on refs working properly ([435c766](https://github.com/qri-io/jsonschema/commit/435c766)) +* **ValError:** overhaul and upgrade error collection & reporting ([66b03e6](https://github.com/qri-io/jsonschema/commit/66b03e6)) +* added format validators for datetime, date, email, ipv4/6 and some others ([3394369](https://github.com/qri-io/jsonschema/commit/3394369)) +* added format validators for datetime, date, email, ipv4/6 and some others ([5bf895c](https://github.com/qri-io/jsonschema/commit/5bf895c)) +* added Must func for easier schema declaration in Go. ([2874aff](https://github.com/qri-io/jsonschema/commit/2874aff)) +* **jsonschema format:** added jsonpointer, reljsonpointer validators ([d787e78](https://github.com/qri-io/jsonschema/commit/d787e78)) +* first pass of draft7 test suite passing ([263a72d](https://github.com/qri-io/jsonschema/commit/263a72d)) +* initial commit ([b620f19](https://github.com/qri-io/jsonschema/commit/b620f19)) +* initial support for local references ([a99baf2](https://github.com/qri-io/jsonschema/commit/a99baf2)) +* return multiple errors on validation call. ([00b42a8](https://github.com/qri-io/jsonschema/commit/00b42a8)), closes [#15](https://github.com/qri-io/jsonschema/issues/15) + + + diff --git a/vendor/github.com/qri-io/jsonschema/DEVELOPERS.md b/vendor/github.com/qri-io/jsonschema/DEVELOPERS.md new file mode 100644 index 00000000000..1f37c649a1e --- /dev/null +++ b/vendor/github.com/qri-io/jsonschema/DEVELOPERS.md @@ -0,0 +1,125 @@ +# Developing go-jsonschema + +* [Development Setup](#setup) +* [Commit Message Guidelines](#commits) +* [Writing Documentation](#documentation) + +## Development Setup + +This document describes how to set up your development environment to build and test jsonschema + +### Installing Dependencies + +Before you can build jsonschema, you must install and configure the following dependencies on your +machine: + +* [Git](http://git-scm.com/): The [Github Guide to + Installing Git][git-setup] is a good source of information. + +* [The Go Programming Language](https://golang.org): see golang.org to get started + +### Forking jsonschema on Github + +To contribute code to jsonschema, you must have a GitHub account so you can push code to your own +fork of jsonschema and open Pull Requests in the [GitHub Repository][github]. + +To create a Github account, follow the instructions [here](https://github.com/signup/free). +Afterwards, go ahead and [fork](http://help.github.com/forking) the +[jsonschema frontend repository][github]. + +### Building jsonschema + +To build jsonschema, you clone the source code repository and use Yarn to run the electron app: + +```shell +# Clone your Github repository: +git clone https://github.com//jsonschema.git + +# Go to the jsonschema directory: +cd jsonschema + +# Build the qri binary +go install +``` + + +## Git Commit Guidelines + +We have very precise rules over how our git commit messages can be formatted. This leads to **more +readable messages** that are easy to follow when looking through the **project history**. But also, +we use the git commit messages to **generate the Qri change log**. + +### Commit Message Format +Each commit message consists of a **header**, a **body** and a **footer**. The header has a special +format that includes a **type**, a **scope** and a **subject**: + +``` +(): + + + +