diff --git a/.github/workflows/on-pull-request.yaml b/.github/workflows/on-pull-request.yaml index 756c893..44f2e58 100644 --- a/.github/workflows/on-pull-request.yaml +++ b/.github/workflows/on-pull-request.yaml @@ -2,13 +2,9 @@ name: CI on: push: - branches: - - master - - main + branches: [ master, main ] pull_request: - branches: - - master - - main + branches: [ master, main ] jobs: on-pull-request: @@ -16,23 +12,23 @@ jobs: strategy: matrix: go-version: - - 1.18.x - - 1.19.x + - 1.22.x + - 1.23.x os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@master + uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - run: go env - name: Cache deps - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} @@ -43,4 +39,4 @@ jobs: run: go mod download - name: Test - run: go test ./... \ No newline at end of file + run: go test ./... diff --git a/example_test.go b/example_test.go index 9fd306e..7af08c7 100644 --- a/example_test.go +++ b/example_test.go @@ -9,7 +9,7 @@ import ( "github.com/mailgun/groupcache/v2" ) -func ExampleUsage() { +func ExampleGroup() { /* // Keep track of peers in our cluster and add our instance to the pool `http://localhost:8080` pool := groupcache.NewHTTPPoolOpts("http://localhost:8080", &groupcache.HTTPPoolOptions{}) diff --git a/go.mod b/go.mod index 72812e3..a0da164 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,29 @@ module github.com/mailgun/groupcache/v2 -go 1.19 +go 1.22 require ( - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.4 + github.com/mailgun/holster/v4 v4.20.3 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.61.0 github.com/segmentio/fasthash v1.0.3 - github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.1 + github.com/sirupsen/logrus v1.9.2 + github.com/stretchr/testify v1.10.0 ) require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/protobuf v1.28.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + golang.org/x/sys v0.28.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index e1d0665..2d3aa30 100644 --- a/go.sum +++ b/go.sum @@ -1,37 +1,59 @@ +github.com/ahmetb/go-linq v3.0.0+incompatible h1:qQkjjOXKrKOTy83X8OpRmnKflXKQIL/mC/gMVVDMhOA= +github.com/ahmetb/go-linq v3.0.0+incompatible/go.mod h1:PFffvbdbtw+QTB0WKRP0cNht7vnCfnGlEpak/DVg5cY= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailgun/holster/v4 v4.20.3 h1:FwHxBvuoWEqEpZGeNCLuk/oAHyNs3+ksGoCW0qbiHyo= +github.com/mailgun/holster/v4 v4.20.3/go.mod h1:HuFVoS8qOhceEBL4czXnVzp0bQrrIkLeX30IAll5hQ0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/groupcache.go b/groupcache.go index 541e89a..787f1dc 100644 --- a/groupcache.go +++ b/groupcache.go @@ -76,7 +76,7 @@ var ( initPeerServer func() ) -// GetGroup returns the named group previously created with NewGroup, or +// GetGroup returns the named group previously created with NewGroup or // nil if there's no such group. func GetGroup(name string) *Group { mu.RLock() @@ -85,6 +85,18 @@ func GetGroup(name string) *Group { return g } +// GetGroups returns all groups previously created with NewGroup or nil if no +// groups. +func GetGroups() []*Group { + list := make([]*Group, 0, len(groups)) + mu.RLock() + for _, group := range groups { + list = append(list, group) + } + mu.RUnlock() + return list +} + // NewGroup creates a coordinated group-aware Getter from a Getter. // // The returned Getter tries (but does not guarantee) to run only one diff --git a/promexporter/LICENSE b/promexporter/LICENSE new file mode 100644 index 0000000..e2d2644 --- /dev/null +++ b/promexporter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Grafana Labs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/promexporter/README.md b/promexporter/README.md new file mode 100644 index 0000000..5a7b5e4 --- /dev/null +++ b/promexporter/README.md @@ -0,0 +1,47 @@ +# Prometheus Groupcache Exporter + +This exporter extracts statistics from group instances and exports as Prometheus metrics. + +## Example + +```go +import ( + "github.com/mailgun/groupcache/v2" + "github.com/mailgun/groupcache/v2/promexporter" + "github.com/prometheus/client_golang/prometheus" +) + +// ... + +collector := promexporter.NewExporter() +prometheus.MustRegister(collector) + +// Collector will discover newly created group. +group := groupcache.NewGroup("mygroup", cacheSize, getter) +``` + +## Metrics + +### Exported from groupcache's `CacheStats()` +- `groupcache_cache_bytes{group,type="main|hot"}`: Gauge of current bytes in use +- `groupcache_cache_evictions_nonexpired_total{group,type="main|hot"}`: Count of cache evictions for non-expired keys due to memory full +- `groupcache_cache_evictions_total{group,type="main|hot"}`: Count of cache evictions +- `groupcache_cache_gets_total{group,type="main|hot"}`: Count of cache gets +- `groupcache_cache_hits_total{group,type="main|hot"}`: Count of cache hits +- `groupcache_cache_items{group,type="main|hot"}`: Gauge of current items in use + +### Exported from groupcache's `Stats()` +- `groupcache_get_from_peers_latency_lower{group}`: Represent slowest duration to request value from peers +- `groupcache_gets_total{group}`: Count of cache gets (including from peers, from either main or hot caches) +- `groupcache_hits_total{group}`: Count of cache hits (from either main or hot caches) +- `groupcache_loads_deduped_total{group}`: Count of loads after singleflight +- `groupcache_loads_total{group}`: Count of (gets - hits) +- `groupcache_local_load_errs_total{group}`: Count of load errors from local cache +- `groupcache_local_loads_total{group}`: Count of loads from local cache +- `groupcache_peer_errors_total{group}`: Count of errors from peers +- `groupcache_peer_loads_total{group}`: Count of loads or cache hits from peers +- `groupcache_server_requests_total{group}`: Count of gets received from peers + +## Attribution + +This package source originated from https://github.com/udhos/groupcache_exporter. See LICENSE for MIT license details impacting the contents of this package directory in addition to the LICENSE at the root of this repo for co-existing Apache license details. diff --git a/promexporter/exporter.go b/promexporter/exporter.go new file mode 100644 index 0000000..7597168 --- /dev/null +++ b/promexporter/exporter.go @@ -0,0 +1,316 @@ +// Package promexporter exports Prometheus metrics for groupcache. +package promexporter + +import ( + "github.com/mailgun/groupcache/v2" + "github.com/prometheus/client_golang/prometheus" +) + +// Exporter implements interface prometheus.Collector to extract metrics from groupcache. +type Exporter struct { + namespace string + labels map[string]string + groupProvider GroupProvider + groupGets *prometheus.Desc + groupHits *prometheus.Desc + groupGetFromPeersLatencyLower *prometheus.Desc + groupPeerLoads *prometheus.Desc + groupPeerErrors *prometheus.Desc + groupLoads *prometheus.Desc + groupLoadsDeduped *prometheus.Desc + groupLocalLoads *prometheus.Desc + groupLocalLoadErrs *prometheus.Desc + groupServerRequests *prometheus.Desc + cacheBytes *prometheus.Desc + cacheItems *prometheus.Desc + cacheGets *prometheus.Desc + cacheHits *prometheus.Desc + cacheEvictions *prometheus.Desc + cacheEvictionsNonExpired *prometheus.Desc +} + +// GroupStatistics is a plugable interface to extract metrics from a groupcache implementation. +// GroupStatistics is used by Exporter to collect the group statistics. +// The user must provide a concrete implementation of this interface that knows how to +// extract group statistics from the actual groupcache implementation. +type GroupStatistics interface { + // Name returns the group's name + Name() string + + // Gets represents any Get request, including from peers + Gets() int64 + + // CacheHits represents either cache was good + CacheHits() int64 + + // GetFromPeersLatencyLower represents slowest duration to request value from peers + GetFromPeersLatencyLower() float64 + + // PeerLoads represents either remote load or remote cache hit (not an error) + PeerLoads() int64 + + // PeerErrors represents a count of errors from peers + PeerErrors() int64 + + // Loads represents (gets - cacheHits) + Loads() int64 + + // LoadsDeduped represents after singleflight + LoadsDeduped() int64 + + // LocalLoads represents total good local loads + LocalLoads() int64 + + // LocalLoadErrs represents total bad local loads + LocalLoadErrs() int64 + + // ServerRequests represents gets that came over the network from peers + ServerRequests() int64 + + MainCacheItems() int64 + MainCacheBytes() int64 + MainCacheGets() int64 + MainCacheHits() int64 + MainCacheEvictions() int64 + MainCacheEvictionsNonExpired() int64 + + HotCacheItems() int64 + HotCacheBytes() int64 + HotCacheGets() int64 + HotCacheHits() int64 + HotCacheEvictions() int64 + HotCacheEvictionsNonExpired() int64 +} + +type Option interface { + apply(*Exporter) +} + +type namespaceOption struct { + namespace string +} + +type labelsOption struct { + labels map[string]string +} + +type groupsOption struct { + provider GroupProvider +} + +type GroupProvider interface { + Groups() []*groupcache.Group +} + +type allGroupsProvider struct{} + +// NewExporter creates Exporter. +// namespace is usually the empty string. +func NewExporter(opts ...Option) *Exporter { + const subsystem = "groupcache" + + e := &Exporter{ + groupProvider: new(allGroupsProvider), + } + for _, opt := range opts { + opt.apply(e) + } + e.groupGets = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "gets_total"), + "Count of cache gets (including from peers, from either main or hot cache)", + []string{"group"}, + e.labels, + ) + e.groupHits = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "hits_total"), + "Count of cache hits (from either main or hot cache)", + []string{"group"}, + e.labels, + ) + e.groupGetFromPeersLatencyLower = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "get_from_peers_latency_lower"), + "Represent slowest duration to request value from peers", + []string{"group"}, + e.labels, + ) + e.groupPeerLoads = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "peer_loads_total"), + "Count of loads or cache hits from peers", + []string{"group"}, + e.labels, + ) + e.groupPeerErrors = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "peer_errors_total"), + "Count of errors from peers", + []string{"group"}, + e.labels, + ) + e.groupLoads = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "loads_total"), + "Count of (gets - hits)", + []string{"group"}, + e.labels, + ) + e.groupLoadsDeduped = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "loads_deduped_total"), + "Count of loads after singleflight", + []string{"group"}, + e.labels, + ) + e.groupLocalLoads = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "local_loads_total"), + "Count of loads from local cache", + []string{"group"}, + e.labels, + ) + e.groupLocalLoadErrs = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "local_load_errs_total"), + "Count of load errors from local cache", + []string{"group"}, + e.labels, + ) + e.groupServerRequests = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "server_requests_total"), + "Count of gets received from peers", + []string{"group"}, + e.labels, + ) + e.cacheBytes = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_bytes"), + "Gauge of current bytes in use", + []string{"group", "type"}, + e.labels, + ) + e.cacheItems = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_items"), + "Gauge of current items in use", + []string{"group", "type"}, + e.labels, + ) + e.cacheGets = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_gets_total"), + "Count of cache gets", + []string{"group", "type"}, + e.labels, + ) + e.cacheHits = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_hits_total"), + "Count of cache hits", + []string{"group", "type"}, + e.labels, + ) + e.cacheEvictions = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_evictions_total"), + "Count of cache evictions", + []string{"group", "type"}, + e.labels, + ) + e.cacheEvictionsNonExpired = prometheus.NewDesc( + prometheus.BuildFQName(e.namespace, subsystem, "cache_evictions_nonexpired_total"), + "Count of cache evictions for non-expired keys due to memory full", + []string{"group", "type"}, + e.labels, + ) + return e +} + +func (e *Exporter) getStatGroups() []GroupStatistics { + groups := e.groupProvider.Groups() + statgroups := make([]GroupStatistics, 0, len(groups)) + for _, g := range groups { + statgroups = append(statgroups, newStatsAdapter(g)) + } + return statgroups +} + +// Describe sends metrics descriptors. +func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { + ch <- e.groupGets + ch <- e.groupHits + ch <- e.groupGetFromPeersLatencyLower + ch <- e.groupPeerLoads + ch <- e.groupPeerErrors + ch <- e.groupLoads + ch <- e.groupLoadsDeduped + ch <- e.groupLocalLoads + ch <- e.groupLocalLoadErrs + ch <- e.groupServerRequests + ch <- e.cacheBytes + ch <- e.cacheItems + ch <- e.cacheGets + ch <- e.cacheHits + ch <- e.cacheEvictions + ch <- e.cacheEvictionsNonExpired +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (e *Exporter) Collect(ch chan<- prometheus.Metric) { + for _, group := range e.getStatGroups() { + e.collectFromGroup(ch, group) + } +} + +func (e *Exporter) collectFromGroup(ch chan<- prometheus.Metric, stats GroupStatistics) { + e.collectStats(ch, stats) + e.collectCacheStats(ch, stats) +} + +func (e *Exporter) collectStats(ch chan<- prometheus.Metric, stats GroupStatistics) { + ch <- prometheus.MustNewConstMetric(e.groupGets, prometheus.CounterValue, float64(stats.Gets()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupHits, prometheus.CounterValue, float64(stats.CacheHits()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupGetFromPeersLatencyLower, prometheus.GaugeValue, stats.GetFromPeersLatencyLower(), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupPeerLoads, prometheus.CounterValue, float64(stats.PeerLoads()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupPeerErrors, prometheus.CounterValue, float64(stats.PeerErrors()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupLoads, prometheus.CounterValue, float64(stats.Loads()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupLoadsDeduped, prometheus.CounterValue, float64(stats.LoadsDeduped()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupLocalLoads, prometheus.CounterValue, float64(stats.LocalLoads()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupLocalLoadErrs, prometheus.CounterValue, float64(stats.LocalLoadErrs()), stats.Name()) + ch <- prometheus.MustNewConstMetric(e.groupServerRequests, prometheus.CounterValue, float64(stats.ServerRequests()), stats.Name()) +} + +func (e *Exporter) collectCacheStats(ch chan<- prometheus.Metric, stats GroupStatistics) { + ch <- prometheus.MustNewConstMetric(e.cacheItems, prometheus.GaugeValue, float64(stats.MainCacheItems()), stats.Name(), "main") + ch <- prometheus.MustNewConstMetric(e.cacheBytes, prometheus.GaugeValue, float64(stats.MainCacheBytes()), stats.Name(), "main") + ch <- prometheus.MustNewConstMetric(e.cacheGets, prometheus.CounterValue, float64(stats.MainCacheGets()), stats.Name(), "main") + ch <- prometheus.MustNewConstMetric(e.cacheHits, prometheus.CounterValue, float64(stats.MainCacheHits()), stats.Name(), "main") + ch <- prometheus.MustNewConstMetric(e.cacheEvictions, prometheus.CounterValue, float64(stats.MainCacheEvictions()), stats.Name(), "main") + ch <- prometheus.MustNewConstMetric(e.cacheEvictionsNonExpired, prometheus.CounterValue, float64(stats.MainCacheEvictionsNonExpired()), stats.Name(), "main") + + ch <- prometheus.MustNewConstMetric(e.cacheItems, prometheus.GaugeValue, float64(stats.HotCacheItems()), stats.Name(), "hot") + ch <- prometheus.MustNewConstMetric(e.cacheBytes, prometheus.GaugeValue, float64(stats.HotCacheBytes()), stats.Name(), "hot") + ch <- prometheus.MustNewConstMetric(e.cacheGets, prometheus.CounterValue, float64(stats.HotCacheGets()), stats.Name(), "hot") + ch <- prometheus.MustNewConstMetric(e.cacheHits, prometheus.CounterValue, float64(stats.HotCacheHits()), stats.Name(), "hot") + ch <- prometheus.MustNewConstMetric(e.cacheEvictions, prometheus.CounterValue, float64(stats.HotCacheEvictions()), stats.Name(), "hot") + ch <- prometheus.MustNewConstMetric(e.cacheEvictionsNonExpired, prometheus.CounterValue, float64(stats.HotCacheEvictionsNonExpired()), stats.Name(), "hot") +} + +// Set namespace of exported metrics. Namespace serves as the prefix to metric names, like: `_groupcache_cache_bytes`. +func WithNamespace(namespace string) Option { + return &namespaceOption{namespace: namespace} +} + +func (o *namespaceOption) apply(e *Exporter) { + e.namespace = o.namespace +} + +// Set labels added to exported metrics. +func WithLabels(labels map[string]string) Option { + return &labelsOption{labels: labels} +} + +func (o *labelsOption) apply(e *Exporter) { + e.labels = o.labels +} + +// Set function used to get groups to export. Called on every scrape. +func WithGroups(groups GroupProvider) Option { + return &groupsOption{provider: groups} +} + +func (o *groupsOption) apply(e *Exporter) { + e.groupProvider = o.provider +} + +func (gp *allGroupsProvider) Groups() []*groupcache.Group { + return groupcache.GetGroups() +} diff --git a/promexporter/exporter_test.go b/promexporter/exporter_test.go new file mode 100644 index 0000000..7c42917 --- /dev/null +++ b/promexporter/exporter_test.go @@ -0,0 +1,412 @@ +package promexporter_test + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "sort" + "sync" + "testing" + "time" + + "github.com/mailgun/groupcache/v2" + "github.com/mailgun/groupcache/v2/promexporter" + "github.com/mailgun/holster/v4/retry" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/client_golang/prometheus/testutil/promlint" + promdto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type MetricInfo struct { + Name string + Labels prometheus.Labels +} + +const ( + pingRoute = "/ping" + metricsRoute = "/metrics" + ttl = time.Minute +) + +func TestExporter(t *testing.T) { + // Given + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup groupcache. + group := newGroup(t, t.Name()) + defer groupcache.DeregisterGroup(group.Name()) + gcexporter := promexporter.NewExporter() + registry := prometheus.NewRegistry() + registry.MustRegister(gcexporter) + + // Setup metrics HTTP server. + httpSrv, err := startMetricsServer(ctx, t, registry, &wg) + require.NoError(t, err) + defer func() { + // Tear down. + t.Log("HTTP server shutting down...") + err = httpSrv.Shutdown(ctx) + require.NoError(t, err) + wg.Wait() + }() + + // When + metricsContent, err := getMetrics(ctx, httpSrv) + require.NoError(t, err) + + // Then + // Parse metrics and assert expected values are found. + mfs, err := parseMetricsContent(metricsContent) + require.NoError(t, err) + expectedMetrics := []MetricInfo{ + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_cache_evictions_nonexpired_total", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_evictions_nonexpired_total", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_cache_evictions_total", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_evictions_total", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_cache_gets_total", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_gets_total", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_cache_hits_total", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_hits_total", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_cache_items", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "groupcache_cache_items", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "groupcache_get_from_peers_latency_lower", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_gets_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_hits_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_loads_deduped_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_loads_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_local_load_errs_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_local_loads_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_peer_errors_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_peer_loads_total", Labels: prometheus.Labels{"group": group.Name()}}, + {Name: "groupcache_server_requests_total", Labels: prometheus.Labels{"group": group.Name()}}, + } + for _, expectedMetric := range expectedMetrics { + testName := fmt.Sprintf("Metric exported %s{%s}", expectedMetric.Name, labelsToString(expectedMetric.Labels)) + t.Run(testName, func(t *testing.T) { + assertContainsMetric(t, expectedMetric, mfs) + }) + } + + t.Run("Lint", func(t *testing.T) { + linter := promlint.New(bytes.NewReader(metricsContent)) + problems, err := linter.Lint() + require.NoError(t, err) + for _, problem := range problems { + assert.Fail(t, fmt.Sprintf("%#v", problem)) + } + }) +} + +func TestExporterWithNamespace(t *testing.T) { + // Given + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup groupcache. + group := newGroup(t, t.Name()) + defer groupcache.DeregisterGroup(group.Name()) + gcexporter := promexporter.NewExporter(promexporter.WithNamespace("mynamespace")) + registry := prometheus.NewRegistry() + registry.MustRegister(gcexporter) + + // Setup metrics HTTP server. + httpSrv, err := startMetricsServer(ctx, t, registry, &wg) + require.NoError(t, err) + defer func() { + // Tear down. + t.Log("HTTP server shutting down...") + err = httpSrv.Shutdown(ctx) + require.NoError(t, err) + wg.Wait() + }() + + // When + metricsContent, err := getMetrics(ctx, httpSrv) + require.NoError(t, err) + + // Then + // Parse metrics and assert expected values are found. + mfs, err := parseMetricsContent(metricsContent) + require.NoError(t, err) + expectedMetrics := []MetricInfo{ + {Name: "mynamespace_groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "main"}}, + {Name: "mynamespace_groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "hot"}}, + {Name: "mynamespace_groupcache_gets_total", Labels: prometheus.Labels{"group": group.Name()}}, + } + for _, expectedMetric := range expectedMetrics { + testName := fmt.Sprintf("Metric exported %s{%s}", expectedMetric.Name, labelsToString(expectedMetric.Labels)) + t.Run(testName, func(t *testing.T) { + assertContainsMetric(t, expectedMetric, mfs) + }) + } +} + +func TestExporterWithLabels(t *testing.T) { + // Given + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup groupcache. + group := newGroup(t, t.Name()) + defer groupcache.DeregisterGroup(group.Name()) + gcexporter := promexporter.NewExporter(promexporter.WithLabels(map[string]string{ + "accountid": "0001", + "dc": "west", + })) + registry := prometheus.NewRegistry() + registry.MustRegister(gcexporter) + + // Setup metrics HTTP server. + httpSrv, err := startMetricsServer(ctx, t, registry, &wg) + require.NoError(t, err) + defer func() { + // Tear down. + t.Log("HTTP server shutting down...") + err = httpSrv.Shutdown(ctx) + require.NoError(t, err) + wg.Wait() + }() + + // When + metricsContent, err := getMetrics(ctx, httpSrv) + require.NoError(t, err) + + // Then + // Parse metrics and assert expected values are found. + mfs, err := parseMetricsContent(metricsContent) + require.NoError(t, err) + expectedMetrics := []MetricInfo{ + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "main", "accountid": "0001", "dc": "west"}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group.Name(), "type": "hot", "accountid": "0001", "dc": "west"}}, + {Name: "groupcache_gets_total", Labels: prometheus.Labels{"group": group.Name(), "accountid": "0001", "dc": "west"}}, + } + for _, expectedMetric := range expectedMetrics { + assertContainsMetric(t, expectedMetric, mfs) + } +} + +type TestGroupProvider struct { + groups []*groupcache.Group +} + +func (gp *TestGroupProvider) Groups() []*groupcache.Group { + return gp.groups +} + +func TestExporterWithGroups(t *testing.T) { + // Given + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup groupcache. + group1 := newGroup(t, t.Name()+"_1") + group2 := newGroup(t, t.Name()+"_2") + group3 := newGroup(t, t.Name()+"_3") + defer func() { + groupcache.DeregisterGroup(group1.Name()) + groupcache.DeregisterGroup(group2.Name()) + groupcache.DeregisterGroup(group3.Name()) + }() + groupProvider := &TestGroupProvider{ + groups: []*groupcache.Group{group1, group2}, + } + gcexporter := promexporter.NewExporter(promexporter.WithGroups(groupProvider)) + registry := prometheus.NewRegistry() + registry.MustRegister(gcexporter) + + // Setup metrics HTTP server. + httpSrv, err := startMetricsServer(ctx, t, registry, &wg) + require.NoError(t, err) + defer func() { + // Tear down. + t.Log("HTTP server shutting down...") + err = httpSrv.Shutdown(ctx) + require.NoError(t, err) + wg.Wait() + }() + + // When + metricsContent, err := getMetrics(ctx, httpSrv) + require.NoError(t, err) + + // Then + // Parse metrics and assert expected values are found. + mfs, err := parseMetricsContent(metricsContent) + require.NoError(t, err) + expectedMetrics := []MetricInfo{ + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group1.Name(), "type": "main"}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group1.Name(), "type": "hot"}}, + {Name: "groupcache_gets_total", Labels: prometheus.Labels{"group": group1.Name()}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group2.Name(), "type": "main"}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group2.Name(), "type": "hot"}}, + {Name: "groupcache_gets_total", Labels: prometheus.Labels{"group": group2.Name()}}, + } + for _, expectedMetric := range expectedMetrics { + assertContainsMetric(t, expectedMetric, mfs) + } + // Assert unexpected values are not found. + unexpectedMetrics := []MetricInfo{ + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group3.Name(), "type": "main"}}, + {Name: "groupcache_cache_bytes", Labels: prometheus.Labels{"group": group3.Name(), "type": "hot"}}, + {Name: "groupcache_gets_total", Labels: prometheus.Labels{"group": group3.Name()}}, + } + for _, unexpectedMetric := range unexpectedMetrics { + assertNotContainsMetric(t, unexpectedMetric, mfs) + } +} + +// Create new test group. +func newGroup(t *testing.T, name string) *groupcache.Group { + getter := func(_ context.Context, key string, dest groupcache.Sink) error { + err := dest.SetString("foobar", time.Now().Add(ttl)) + require.NoError(t, err) + return nil + } + return groupcache.NewGroup(name, 10_000, groupcache.GetterFunc(getter)) +} + +// Start an HTTP server on a dynamic port. +func startMetricsServer(ctx context.Context, t *testing.T, registry *prometheus.Registry, wg *sync.WaitGroup) (*http.Server, error) { + mux := http.NewServeMux() + mux.HandleFunc(pingRoute, pingHandler) + // mux.Handle(metricsRoute, promhttp.Handler()) + mux.Handle(metricsRoute, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + httpSrv := &http.Server{ + Addr: listener.Addr().(*net.TCPAddr).AddrPort().String(), + Handler: mux, + ReadHeaderTimeout: time.Minute, + } + wg.Add(1) + go func() { + _ = httpSrv.Serve(listener) + wg.Done() + }() + err = waitForReady(ctx, httpSrv) + t.Logf("HTTP server ready at %s", httpSrv.Addr) + return httpSrv, nil +} + +func waitForReady(ctx context.Context, httpSrv *http.Server) error { + httpClt := http.DefaultClient + return retry.Until(ctx, retry.Interval(20*time.Millisecond), func(ctx context.Context, _ int) error { + pingURL := fmt.Sprintf("http://%s%s", httpSrv.Addr, pingRoute) + ctx2, cancel2 := context.WithTimeout(ctx, 10*time.Second) + defer cancel2() + rq, err := http.NewRequestWithContext(ctx2, http.MethodGet, pingURL, http.NoBody) + if err != nil { + return err + } + rs, err := httpClt.Do(rq) + if err != nil { + return err + } + rs.Body.Close() + return nil + }) +} + +func pingHandler(writer http.ResponseWriter, rq *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func getURL(ctx context.Context, u string) (*http.Response, error) { + rq, err := http.NewRequestWithContext(ctx, http.MethodGet, u, http.NoBody) + if err != nil { + return nil, err + } + return http.DefaultClient.Do(rq) +} + +// Request metrics endpoint and return content. +func getMetrics(ctx context.Context, httpSrv *http.Server) ([]byte, error) { + rs, err := getURL(ctx, fmt.Sprintf("http://%s%s", httpSrv.Addr, metricsRoute)) + if err != nil { + return nil, err + } + content, err := io.ReadAll(rs.Body) + rs.Body.Close() + return content, err +} + +// Parse metrics content into Prometheus metric structures. +func parseMetricsContent(content []byte) (map[string]*promdto.MetricFamily, error) { + var tp expfmt.TextParser + return tp.TextToMetricFamilies(bytes.NewReader(content)) +} + +func containsMetric(mi MetricInfo, mfs map[string]*promdto.MetricFamily) bool { + mf, ok := mfs[mi.Name] + if !ok { + return false + } + +LM1: + for _, metric := range mf.Metric { + LM2: + for key, value := range mi.Labels { + for _, label := range metric.Label { + if label.Name == nil || label.Value == nil { + continue + } + if *label.Name == key && *label.Value == value { + // Label match, go to next expected label. + continue LM2 + } + } + // Expected label not found. + continue LM1 + } + // All labels match. + return true + } + // No metrics match. + return false +} + +// Assert expected metric name and labels are present. +func assertContainsMetric(t *testing.T, expected MetricInfo, mfs map[string]*promdto.MetricFamily) { + assert.True(t, containsMetric(expected, mfs), "Metric not found: %s", expected.String()) +} + +// Assert expected metric name and labels are not present. +func assertNotContainsMetric(t *testing.T, unexpected MetricInfo, mfs map[string]*promdto.MetricFamily) { + assert.False(t, containsMetric(unexpected, mfs), "Metric unexpectedly found: %s", unexpected.String()) +} + +func labelsToString(labels prometheus.Labels) string { + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + var buf bytes.Buffer + for i, key := range keys { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(fmt.Sprintf("%s=%q", key, labels[key])) + } + return buf.String() +} + +func (mi *MetricInfo) String() string { + return fmt.Sprintf("%s{%s}", mi.Name, labelsToString(mi.Labels)) +} diff --git a/promexporter/statsadapter.go b/promexporter/statsadapter.go new file mode 100644 index 0000000..2b0cacf --- /dev/null +++ b/promexporter/statsadapter.go @@ -0,0 +1,134 @@ +package promexporter + +import ( + "github.com/mailgun/groupcache/v2" +) + +// Group implements interface GroupStatistics to extract metrics from groupcache group. +type statsAdapter struct { + group *groupcache.Group +} + +// New creates a new Group. +func newStatsAdapter(group *groupcache.Group) *statsAdapter { + return &statsAdapter{group: group} +} + +// Name returns the group's name +func (g *statsAdapter) Name() string { + return g.group.Name() +} + +// Gets represents any Get request, including from peers +func (g *statsAdapter) Gets() int64 { + return g.group.Stats.Gets.Get() +} + +// CacheHits represents either cache was good +func (g *statsAdapter) CacheHits() int64 { + return g.group.Stats.CacheHits.Get() +} + +// GetFromPeersLatencyLower represents slowest duration to request value from peers +func (g *statsAdapter) GetFromPeersLatencyLower() float64 { + latencyMs := g.group.Stats.GetFromPeersLatencyLower.Get() + if latencyMs == 0 { + return 0 + } + return float64(latencyMs) / 1000 +} + +// PeerLoads represents either remote load or remote cache hit (not an error) +func (g *statsAdapter) PeerLoads() int64 { + return g.group.Stats.PeerLoads.Get() +} + +// PeerErrors represents a count of errors from peers +func (g *statsAdapter) PeerErrors() int64 { + return g.group.Stats.PeerErrors.Get() +} + +// Loads represents (gets - cacheHits) +func (g *statsAdapter) Loads() int64 { + return g.group.Stats.Loads.Get() +} + +// LoadsDeduped represents after singleflight +func (g *statsAdapter) LoadsDeduped() int64 { + return g.group.Stats.LoadsDeduped.Get() +} + +// LocalLoads represents total good local loads +func (g *statsAdapter) LocalLoads() int64 { + return g.group.Stats.LocalLoads.Get() +} + +// LocalLoadErrs represents total bad local loads +func (g *statsAdapter) LocalLoadErrs() int64 { + return g.group.Stats.LocalLoadErrs.Get() +} + +// ServerRequests represents gets that came over the network from peers +func (g *statsAdapter) ServerRequests() int64 { + return g.group.Stats.ServerRequests.Get() +} + +// MainCacheItems represents number of items in the main cache +func (g *statsAdapter) MainCacheItems() int64 { + return g.group.CacheStats(groupcache.MainCache).Items +} + +// MainCacheBytes represents number of bytes in the main cache +func (g *statsAdapter) MainCacheBytes() int64 { + return g.group.CacheStats(groupcache.MainCache).Bytes +} + +// MainCacheGets represents number of get requests in the main cache +func (g *statsAdapter) MainCacheGets() int64 { + return g.group.CacheStats(groupcache.MainCache).Gets +} + +// MainCacheHits represents number of hit in the main cache +func (g *statsAdapter) MainCacheHits() int64 { + return g.group.CacheStats(groupcache.MainCache).Hits +} + +// MainCacheEvictions represents number of evictions in the main cache +func (g *statsAdapter) MainCacheEvictions() int64 { + return g.group.CacheStats(groupcache.MainCache).Evictions +} + +// MainCacheEvictionsNonExpired represents number of evictions for non-expired keys in the main cache +func (g *statsAdapter) MainCacheEvictionsNonExpired() int64 { + return 0 +} + +// HotCacheItems represents number of items in the main cache +func (g *statsAdapter) HotCacheItems() int64 { + return g.group.CacheStats(groupcache.HotCache).Items +} + +// HotCacheBytes represents number of bytes in the hot cache +func (g *statsAdapter) HotCacheBytes() int64 { + return g.group.CacheStats(groupcache.HotCache).Bytes +} + +// HotCacheGets represents number of get requests in the hot cache +func (g *statsAdapter) HotCacheGets() int64 { + return g.group.CacheStats(groupcache.HotCache).Gets +} + +// HotCacheHits represents number of hit in the hot cache +func (g *statsAdapter) HotCacheHits() int64 { + return g.group.CacheStats(groupcache.HotCache).Hits +} + +// HotCacheEvictions represents number of evictions in the hot cache +func (g *statsAdapter) HotCacheEvictions() int64 { + return g.group.CacheStats(groupcache.HotCache).Evictions +} + +// HotCacheEvictionsNonExpired represents number of evictions for non-expired keys in the hot cache +func (g *statsAdapter) HotCacheEvictionsNonExpired() int64 { + return 0 +}